[
  {
    "path": ".gitignore",
    "content": ".idea\nGemfile.lock\next/nmatrix/dense/daxpy.c\next/nmatrix/dense/dgeco.c\next/nmatrix/dense/dgefa.c\next/nmatrix/dense/dgemm.c\next/nmatrix/dense/dgemv.c\next/nmatrix/dense/dscal.c\next/nmatrix/dense/idamax.c\next/nmatrix/dense/467.c\next/nmatrix/dense/*.f\next/nmatrix/dense/transpose.txt\next/nmatrix/yale/aicm.tex\ntmp/\ntest.c\nspec/*.mtx\n*.so\n*.bundle\n*.bundle.dSYM\n*.log\n*.sw?\n*~\n/tags\n*.gem\nhtml/\ndoc/\ndocs/\npkg/\n.autotest\next/nmatrix_java/vendor/\next/nmatrix_java/target/\next/nmatrix_java/build/\next/nmatrix_java/target/\n*.class\n*.jar"
  },
  {
    "path": ".rspec",
    "content": "--color\n--format RSpec::Longrun::Formatter\n"
  },
  {
    "path": ".travis.yml",
    "content": "language: ruby\nsudo: required\ncache: bundler\nos:\n  - linux\n  - osx\nosx_image: xcode7.2\nenv:\n  - USE_ATLAS=1       # This configuration installs ATLAS, builds and tests the nmatrix, nmatrix-atlas, and nmatrix-lapacke gems\n  - USE_OPENBLAS=1    # Installs OpenBLAS and reference LAPACK, builds and tests nmatrix, nmatrix-lapacke\n  - USE_REF=1         # Installs OpenBLAS and reference LAPACK, builds and tests nmatrix, nmatrix-lapacke\n  - NO_EXTERNAL_LIB=1 # No external libraries installed, only nmatrix\nrvm:\n  - 2.0.0-p648\n  - 2.1.8\n  - 2.2.4\n  - 2.3.0\n  - 2.4.2\n  - ruby-head\n  # The latest stable and head versions built by clang\n  - 2.3.0-clang\n  - ruby-head-clang\n  # JRuby versions --- experimental, pending merging Prasun's GSoC project.\n  - jruby-9.0.5.0 # earliest supported version (uncomment when jruby-head is passing)\n  - jruby-head    # latest supported JRuby\n  # Make sure to add exclude lines for new JRuby versions below.\n\nbefore_install: ./travis.sh before_install\n\ninstall: ./travis.sh install\n\nscript: ./travis.sh script\n\n# Define extra configurations to add to the build matrix.\n# The idea here is that the USE_ATLAS=1 option should exercise all the ruby\n# code, so it is the only one we need to test with all versions of ruby.\n# For other configurations we only test with one version of ruby.\nmatrix:\n  exclude:\n    - rvm: jruby-head\n      env: USE_ATLAS=1\n    - rvm: jruby-head\n      env: USE_OPENBLAS=1\n    - rvm: jruby-head\n      env: USE_REF=1\n    - rvm: jruby-9.0.5.0\n      env: USE_ATLAS=1\n    - rvm: jruby-9.0.5.0\n      env: USE_OPENBLAS=1\n    - rvm: jruby-9.0.5.0\n      env: USE_REF=1    \n    # NOTE: The following two ruby versions on OSX are currently unavailable\n    - os: osx\n      rvm: 2.0.0-p648\n    - os: osx\n      rvm: 2.1.8\n    - os: osx\n      rvm: 2.2.4\n    - os: osx\n      rvm: 2.3.0\n    - os: osx\n      rvm: ruby-head\n    - os: osx\n      rvm: 2.3.0-clang\n    - os: osx\n      rvm: ruby-head-clang\n    # FIXME: The following configuration is unavailable because ATLAS should be built from source.\n    #        We need homebrew formula for ATLAS and its bottle.\n    - os: osx\n      env: USE_ATLAS=1\n    # FIXME: The following configuration takes too long time when installing homebrew/dupes/lapack.\n    #        We need the bottle of lapack formula.\n    - os: osx\n      env: USE_REF=1\n  include:\n    # The latest stable and head versions of ruby built by clang on OSX\n    - os: osx\n      compiler: clang\n      rvm: 2.2\n      env:\n        - ruby_version=2.4.0-dev USE_OPENBLAS=1\n    - os: osx\n      compiler: clang\n      rvm: 2.2\n      env:\n        - ruby_version=2.4.0-dev NO_EXTERNAL_LIB=1\n    - os: osx\n      compiler: clang\n      rvm: 2.2\n      env:\n        - ruby_version=2.3.0 USE_OPENBLAS=1\n    - os: osx\n      compiler: clang\n      rvm: 2.2\n      env:\n        - ruby_version=2.3.0 NO_EXTERNAL_LIB=1\n    # The latest version of Ruby 2.2.x built by clang on OSX\n    - os: osx\n      compiler: clang\n      rvm: 2.2\n      env:\n        - ruby_version=2.2.4 USE_OPENBLAS=1\n    # The latest version of Ruby 2.1.x built by clang on OSX\n    - os: osx\n      compiler: clang\n      rvm: 2.2\n      env:\n        - ruby_version=2.1.8 USE_OPENBLAS=1\n  allow_failures:\n    # trunk\n    - rvm: ruby-head\n    - rvm: ruby-head-clang\n    - os: osx\n      compiler: clang\n      rvm: 2.2\n      env:\n        - ruby_version=2.4.0-dev USE_OPENBLAS=1\n    - os: osx\n      compiler: clang\n      rvm: 2.2\n      env:\n        - ruby_version=2.4.0-dev NO_EXTERNAL_LIB=1\n\nnotifications:\n  irc: \"chat.freenode.net#sciruby\"\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "NMatrix is part of SciRuby, a collaborative effort to bring scientific\ncomputation to Ruby. If you want to help, please do so!\n\nThis guide covers ways in which you can contribute to the development\nof SciRuby and, more specifically, NMatrix.\n\n## How to help\n\nThere are various ways to help NMatrix: bug reports, coding and\ndocumentation. All of them are important.\n\nFirst, you can help implement new features or bug fixes. To do that,\nvisit our [roadmap](https://github.com/SciRuby/nmatrix/wiki/Roadmap)\nor our [issue tracker][2]. If you find something that you want to work\non, post it in the issue or on our [mailing list][1].\n\nYou need to send tests together with your code. No exceptions. You can\nask for our opinion, but we won't accept patches without good spec\ncoverage.\n\nWe use RSpec for testing. If you aren't familiar with it, there's a\ngood [guide to better specs with RSpec](http://betterspecs.org/) that\nshows a bit of the syntax and how to use it properly.  However, the\nbest resource is probably the specs that already exist -- so just read\nthem.\n\nAnd don't forget to write documentation (we use rdoc). It's necessary\nto allow others to know what's available in the library. There's a\nsection on it later in this guide.\n\nWe only accept bug reports and pull requests in GitHub. You'll need to\ncreate a new (free) account if you don't have one already. To learn\nhow to create a pull request, please see\n[this guide on collaborating](https://help.github.com/categories/63/articles).\n\nIf you have a question about how to use NMatrix or SciRuby in general\nor a feature/change in mind, please ask the\n[sciruby-dev mailing list][1].\n\nThanks!\n\n## Coding\n\nTo start helping with the code, you need to have all the dependencies in place:\n\n- GCC 4.3+\n- git\n- Ruby 1.9+\n- `bundler` gem\n- ATLAS/LAPACKE/FFTW dependending on the plugin you want to change.\n\nNow, you need to clone the git repository:\n\n```bash\n$ git clone git://github.com/SciRuby/nmatrix.git\n$ cd nmatrix\n$ bundle install\n$ rake compile\n$ rake spec\n```\n\nThis will install all dependencies, compile the extension and run the\nspecs.\n\nFor **JRuby**\n\n```bash\n$ mkdir ext/nmatrix_java/vendor\nDownload commons_math.3.6.1 jar and place it in ext/nmatrix_java/vendor directory\n$ mkdir -p ext/nmatrix_java/build/class\n$ mkdir ext/nmatrix_java/target\n$ rake jruby\n```\n\nIf everything's fine until now, you can create a new branch to work on\nyour feature:\n\n```bash\n$ git branch new-feature\n$ git checkout new-feature\n```\n\nBefore commiting any code, please read our\n[Contributor Agreement](http://github.com/SciRuby/sciruby/wiki/Contributor-Agreement).\n\n### Guidelines for interfacing with C/C++\n\nNMatrix uses a lot of C/C++ to speed up execution of processes and\ngive more control over data types, storage types, etc. Since we are\ninterfacing between two very different languages, things can get out\nof hand pretty fast.\n\nPlease go thorough this before you create any C accessors:\n\n* Perform all pre-computation error checking in Ruby.\n* Curate any extra data (cloned objects, trivial computations, etc.) in Ruby.\n* Do _NOT_ resolve VALUE into constituent elements unless they reach the function where the elements are needed or it is absolutely necessary. Passing around a VALUE in the C/C++ core is much more convienient than passing around `void*` pointers which point to an array of matrix elements.\n\nBasically, follow a practice of 'once you enter C, never look back!'.\n\nIf you have something more in mind, discuss it in the issue tracker or\non\n[this](https://groups.google.com/forum/#!topic/sciruby-dev/OJxhrGG309o)\nthread.\n\n## C/C++ style guide\n\nThis section is a work in progress.\n\n* Use camel_case notation for arguments. No upper case.\n* Write a brief description of the arguments that your function\n  receives in the comments directly above the function.\n* Explicitly state in the comments any anomalies that your function\n  might have. For example, that it does not work with a certain\n  storage or data type.\n\n## Documentation\n\nThere are two ways in which NMatrix is being documented: guides and\ncomments, which are converted with RDoc into the documentation seen in\n[sciruby.com](http://sciruby.com).\n\nIf you want to write a guide on how to use NMatrix to solve some\nproblem or simply showing how to use one of its features, write it as\na wiki page and send an e-mail on the [mailing list][1]. We're working\nto improve this process.\n\nIf you aren't familiar with RDoc syntax,\n[this is the official documentation](http://docs.seattlerb.org/rdoc/RDoc/Markup.html).\n\n## Making new nmatrix extensions\n\nFrom version 0.2, NMatrix supports extensions, all of which can be\nhosted from the main nmatrix repo.\n\nRefer to\n[this blog post ](http://wlevine.github.io/2015/06/15/releasing-multiple-gems-with-c-extensions-from-the-same-repository.html)\nto see how to do that in case you want to write your own extension for\nnmatrix.\n\n## Conclusion\n\nThis guide was heavily based on the\n[Contributing to Ruby on Rails guide](http://edgeguides.rubyonrails.org/contributing_to_ruby_on_rails.html).\n\n[1]: https://groups.google.com/forum/?fromgroups#!forum/sciruby-dev\n[2]: https://github.com/sciruby/nmatrix/issues?sort=created&state=open\n"
  },
  {
    "path": "Gemfile",
    "content": "source 'https://rubygems.org'\n\n#main gemspec\ngemspec :name => 'nmatrix'\n\n#plugin gemspecs\nDir['nmatrix-*.gemspec'].each do |gemspec_file|\n  plugin_name = gemspec_file.match(/(nmatrix-.*)\\.gemspec/)[1]\n  gemspec(:name => plugin_name, :development_group => :plugin)\nend\n"
  },
  {
    "path": "History.txt",
    "content": "=== 0.0.1 / 2012-04-10\n\n* 1 major enhancement\n\n  * Initial alpha release\n\n=== 0.0.2 / 2012-09-21\n\n* 15 major enhancements\n\n  * Second alpha release\n\n  * Rewrote NMatrix in C++0x and C++11 using templates, namespaces;\n    removed Ruby generators and CAST parser\n\n  * Added preliminary C API\n\n  * Slicing and referencing support for dense and list matrices (by\n    @flipback)\n\n  * BLAS level-3 xTRSM algorithm added for rationals and BLAS types\n\n  * LAPACK support added, including partially working xGETRF\n    subroutine\n\n  * Element-wise comparisons now return byte-matrices\n\n  * Element-wise operations on list matrices may alter the default\n    value of the return matrix\n\n  * Element-wise division behaves like Ruby division\n\n  * Improved MATLAB .MAT v5 file reading\n\n  * clang support\n\n  * `==` operator now used for matrix equality, `=~` and `!~` for\n    element-wise comparisons\n\n  * Dense `each` returns an Enumerator when called without a block\n\n  * Sped up list storage item deletion, fixed bugs\n\n  * List matrix-to-hash conversion with `to_h`\n\n* Note: Element-wise list operations current disabled\n\n=== 0.0.3 / 2013-01-18\n\n* 8 major enhancements\n\n  * Matrix-scalar operations (dense, list)\n\n  * Shortcuts for matrix creation (by @agarie)\n\n  * Access to most ATLAS-implemented LAPACK functions for those\n    with ATLAS' CLAPACK interface: xGETRF, xGETRI, xGETRS, xGESV,\n    xPOTRF, xPOTRI, xPOTRS, xPOSV, xLASWP, xSCAL, xLAUUM\n\n  * Access to additional ATLAS-implemented BLAS functions: xTRMM,\n    xSYRK, xHERK, xROT, xROTG\n\n  * Non-ATLAS versions of CLAPACK functions: xLASWP, xSCAL, xLAUUM,\n    xROT\n\n  * Matrix inversion (LU and Cholesky; requires CLAPACK)\n\n  * LU factoring with and without CLAPACK\n\n  * Native matrix I/O for dense (supporting upper, lower, hermitian,\n    skew, symmetric, and general) and yale (general only); excludes\n    Ruby objects currently\n\n* 2 bug fixes:\n\n  * Yale-to-list casting\n\n  * Now requires packable-1.3.5 or higher, fixing a problem with\n    MATLAB .mat v5 file I/O (specific to doubles)\n\n=== 0.0.4 / 2013-05-17\n\n* 3 major enhancements\n\n  * Added a more user-friendly interface for cblas_rot in the form of\n    NMatrix::BLAS::rot\n\n  * Added to_hash for Yale matrices\n\n  * Improved source code documentation (by @agarie)\n\n* 4 minor enhancements\n\n  * Spec clean-up (by @masaomi)\n\n  * Made it possible to request a different itype internally for Yale\n    matrices\n\n  * Improved space usage of Yale slice-by-copying, which was\n    requesting more space than needed\n\n  * Improved compile-time Mac OS X and Ubuntu library searching\n\n* 8 bug fixes:\n\n  * NMatrix::BLAS::gemv segfaults\n\n  * Fixed Yale matrix slice-by-copy write error where default itypes\n    (which are based on shape) differ, and a separate problem where\n    incorrect IJA and A entries were written.\n\n  * NVector-scalar operations and NVector-NVector element-wise\n    options now return an NVector instead of an NMatrix\n\n  * Addressed problems with segmentation faults during iteration (by\n    @cjfuller)\n\n  * Addressed Ubuntu/Debian installation problems (incompatibility\n    with apt-supplied atlas)\n\n  * Fixed transpose behavior following slice-by-reference (by\n    @cjfuller)\n\n  * Fixed gem install command in Rakefile (by @jpmckinney)\n\n  * Fixed Spanish language compile issue (by @imcsk8 and @agarie)\n\n=== 0.0.5 / 2013-07-09\n\n* 4 major enhancements\n\n  * NVector orientation is now controlled by its shape, not by the\n    @orientation property\n\n  * NVector default orientation is now a row vector rather than a\n    column, as this is more efficient for Yale storage\n\n  * NVector objects may now be created with dtypes other than dense\n\n  * Exposure of additional ATLAS-implemented BLAS functions,\n    including native rational and Ruby object support, for xANUM (sum\n    of the absolute values of a vector) and xNRM2 (2-norm of a\n    vector); and Ruby helper functions BLAS::anum and BLAS::nrm2\n    which should do more careful argument sanity checking\n\n* 9 minor enhancements\n\n  * Added #yale_vector_insert to NMatrix::YaleFunctions, to speed up\n    insertion of multiple items into a Yale matrix\n\n  * Added #yale_nd_row, #yale_nd_row_as_hash, #yale_nd_row_as_array,\n    #yale_nd_row_as_set, #yale_nd_row_as_sorted_set, #yale_row,\n    #yale_row_as_hash, #yale_row_as_array, #yale_row_as_set,\n    #yale_row_as_sorted_set, #yale_nd_row_size to\n    NMatrix::YaleFunctions in order to speed up getting multiple\n    items from some row of a Yale matrix\n\n  * Improved #yale_ija, #yale_a, #yale_d by allowing an optional\n    index argument, which returns a single array value instead of\n    copying and returning the entire array\n\n  * Improved sorting algorithm for Yale matrix multiplication;\n    instead of selection sort, now uses quicksort; and subs in\n    insertion sort for small partitions\n\n  * Slicing a single row or column now returns an NVector instead\n    of an NMatrix (does not yet work for n-dimensional matrices)\n\n  * Improved function documentation for NVector and NMatrix\n\n  * Added #min, #max, #to_a, #shuffle, #shuffle!, #absolute_sum,\n    #norm2 functions to NVector\n\n  * Aliased missing dimension of NVector#each_stored_with_indices to\n    #each_stored_with_index, which only yields a value and i or j\n    (not both i and j) depending on the vector orientation\n\n  * Added #each_row, #each_column to NMatrix\n\n* 5 bug fixes:\n\n  * Dense iterators now return self (an NMatrix) in order to be\n    consistent with Ruby Array behavior (by @cjfuller)\n\n  * Fixed Yale resize problem (by @v0dro)\n\n  * Fixed Yale nx1 times 1xn multiplication problem\n\n  * Fixed Yale sorting-following-multiplication problem\n\n  * NMatrix::read() now raises an exception when asked to read a file \n    that does not exist\n\n=== 0.0.6 / 2013-08-09\n\n* 8 major enhancements:\n\n  * Refactored iteration, so that each storage type now has each of:\n    #each, #each_with_indices, #each_stored_with_indices\n\n  * Added element-wise power function (**) for dense matrices (by\n    @agarie)\n\n  * Dramatically improved matrix element-wise and scalar functions so\n    C++ templates are no longer necessary; element-wise operations\n    may now be written in protected Ruby methods that look like\n    NMatrix#__list_elementwise_op__ and NMatrix#__list_scalar_op__\n\n  * Element-wise and scalar operations that might return a true or\n    false now return Ruby matrices\n\n  * Yale element-wise and scalar operations have been added\n\n  * Yale is now allowed to have a non-zero default (specifically to\n    permit true-false matrices and nil sparse bases)\n\n  * Dramatically improved NMatrix#cast to allow for hashed options\n    including a :default for list and yale casts\n\n  * Dramatically improved speed of compilation\n\n* 14 minor enhancements:\n\n  * Improved documentation for exposed BLAS and LAPACK functions\n\n  * Allowed for use of BLAS::rot without cloning x and y (in-place\n    plane rotation); removed unnecessary test of unfriendly version\n\n  * Added more user-friendly cblas_xrotg implementation: BLAS::rotg\n\n  * Moved NMatrix::YaleFunctions::yale_vector_insert to\n    NMatrix#__yale_vector_set__, which is more consistent with\n    behavior\n\n  * Changed notations for protected stype-specific functions, which\n    now look like __stype_method_name__, e.g., __yale_vector_set__\n\n  * Added NMatrix#__list_default_value__ protected function to get\n    the initial (sparse) value for list matrices\n\n  * Changed behavior and names of NMatrix::YaleFunctions methods\n    which get column indices and cell contents by row, so that they\n    now expect the :keys option (like Hash#keys) instead of :array,\n    which doesn't make sense; name changes are as follows:\n      yale_row_as_sorted_set -> yale_ja_d_keys_sorted_set_at\n      yale_row_as_set -> yale_ja_d_keys_set_at\n      yale_row_as_array -> yale_ja_d_keys_at\n      yale_nd_row_as_sorted_set -> yale_ja_sorted_set_at\n      yale_nd_row_as_set -> yale_ja_set_at\n      yale_nd_row_as_array -> yale_ja_at\n    Aliases are included but will be removed without notice.\n\n  * Added NVector#sorted_indices and #binned_sorted_indices for use\n    when running k-nearest neighbor searches on a distance matrix\n\n  * Added NVector::logspace shortcut function (analogous to\n    NVector::linspace)\n\n  * Cleaned up code by removing monkey patches that we stopped using\n    circa v0.0.2 (Array#min, Array#max, String#constantize,\n    String#camelize, String#underscore)\n\n  * Re-enabled element-wise mod (%) method\n\n  * Added NMatrix::guess_dtype class method, which allows you to\n    figure out what dtype (roughly) should be used for any given\n    Ruby value (e.g., 3)\n\n  * String and nil objects in NMatrix cells are no longer outlawed\n    (but are not supported), provided they are of the :object dtype\n\n  * NMatrix#diag shortcut for specifying sparse matrix with a user-\n    specified diagonal array (by @ryanmt)\n\n* 3 bug fixes:\n\n  * Corrected BLAS::rot bounds checking on optional n argument\n\n  * Removed BLAS::rotg and BLAS::nrm2 code for rational numbers, as\n    both involve a square root\n\n  * Repaired list matrix element-wise functions\n\n=== 0.0.7 / 2013-08-22\n\n* 6 major enhancements:\n\n  * Eliminated NVector in favor of NMatrix objects with\n    #effective_dim smaller than #dim; added NVector-like\n    functionality to NMatrix, sometimes with #respond_to? dependent\n    upon matrix dimensions; allowed for NVector.new to continue\n    to function as before, but now returns an NMatrix instead\n\n  * Began major re-factoring of headers for math.cpp\n\n  * Added two singular value decomposition functions for dense\n    NMatrix objects, #gesvd and #gesdd, for floating point and\n    complex dtypes\n\n  * Added additional slicing shorthand, which uses hashes (e.g.,\n    n[0=>3,2] for n[0..3,2]), which may eventually allow users to use\n    n[0:3,2] notation instead (needs Ruby core patch)\n\n  * #[] and #[]= calls no longer need index on those matrix shape\n    elements which are equal to 1 (e.g., vectors)\n\n  * Yale slicing-by-reference has been added\n\n* 18 minor enhancements:\n\n  * Added user-friendly NMatrix::LAPACK::laswp method\n\n  * Added NMatrix#permute_columns! and NMatrix#permute_columns\n\n  * Added NMatrix#abs to compute element-wise absolute values, and\n    #abs_dtype to determine the dtype returned by a call to #abs on a\n    given matrix (needed for RSpec)\n\n  * Added NMatrix#size to compute the total number of cells in an\n    NMatrix object (solely accounting for the shape, not sparsity)\n\n  * Added RSpec monkey patches for #be_within to work with NMatrix\n    objects; usable by requiring \"nmatrix/rspec\"\n\n  * Added experimental NMatrix::LAPACK::lapack_gesvd function (which\n    does NOT depend upon CLAPACK) (by @ryanmt and @mohawkjohn)\n\n  * Added experimental non-LAPACK-dependent function\n    NMatrix::LAPACK::lapack_gesdd\n\n  * Added NMatrix#supershape method for getting the shape of a\n    slice's parent or other ancestor, which may be useful for calling\n    ATLAS and LAPACK functions on slices\n\n  * Aliased NMatrix[] to function like N[] shortcut for matrix\n    creation (by @agarie)\n\n  * Added #layer for matrices with dimension greater than two\n    (corresponds to #row and #column)\n\n  * Added #rank and #each_rank generics for #row/#column/#layer and\n    #each_row/#each_column/#each_layer respectively (#each_rank\n    replaces #each_along_dim)\n\n  * Replaced #reduce_along_dim with #inject_rank\n\n  * NMatrix#to_a now works for up to two dimensional matrices; and\n    returns a flattened array for single-row matrices\n\n  * NMatrix#to_flat_a now returns a flattened array of all entries\n\n  * Re-organized NMatrix Ruby sources into multiple files: math.rb\n    for instance methods which are mathematical in nature or are\n    essentially ATLAS/LAPACK/BLAS calls, enumerate.rb for methods\n    involving enumeration; and existing shortcuts.rb for convenience\n    functions and shortcut constructors, and nmatrix.rb for core\n    functionality (#inspect, #to_h, #to_a, #to_s, #pretty_print,\n    and so on)\n\n  * Improved #pretty_print, which now also prints layers (the third\n    dimension in a 3D matrix)\n\n  * Re-factored some of dense slicing to remove some redundant code\n\n  * Added shortcut functions #list?, #dense?, #yale? for quickly\n    testing matrix storage type\n\n* 5 bug fixes:\n\n  * Fixed compilation problem involving <typeinfo> and <vector> STL\n    headers\n\n  * Fixed NMatrix#inverse problem with non-square matrices\n\n  * Fixed invalid read problem detected by Valgrind for\n    Yale element-wise subtraction in spec\n\n  * Fixed conversion from Ruby object to Complex and Rational\n\n  * Fixed memory leak in slicing\n\n=== 0.0.8 / 2013-08-23\n\n* 2 bug fixes:\n\n  * Fixed Ubuntu compilation bug caused by math header file\n    refactoring\n\n  * Fixed pry version error which manifests on some systems but not\n    others\n\n=== 0.0.9 / 2013-09-18\n\n* 5 major enhancements:\n\n  * Re-factored NMatrix constructor\n\n  * Improved usability of NMatrix shortcut constructor options\n    (e.g., #zeros, #ones, #random, etc.) using new NMatrix\n    constructor\n\n  * Left-assignment of slices for all matrix storage types (uses a\n    dense cast, or accepts an array or single value)\n\n  * Re-factored Yale into a more object-oriented and less confusing\n    set of header files\n\n  * Enabled Travis CI (by @cjfuller)\n\n* 4 minor enhancements:\n\n  * Renamed some specs in order to change their test order, so that\n    critical tests fail first (particularly in the case of\n    segmentation faults)\n\n  * Default dtype is now :object when no initial values are\n    specified\n\n  * Deprecated NVector#initialize and a number of other unnecessary\n    NVector functionality\n\n  * Made Ubuntu compilation significantly easier (by @cjfuller)\n\n* 2 bug fixes:\n\n  * nil values in matrices are now pretty printed as \"nil\"\n\n  * Casting from dense to Yale now properly accepts the default\n    value option\n\n=== 0.1.0.rc1 / 2013-12-28\n\n* 4 major enhancements:\n\n  * Improved garbage collection strategy for partial object creation\n    (i.e., when VALUEs are allocated but not registered right away),\n    which in addition to fixing numerous bugs should prevent some new\n    bugs from arising in the future (by @cjfuller)\n\n  * Implemented list storage transpose\n\n  * Implemented generic n-dimensional transpose\n\n  * Implemented == comparison between differing matrix stypes\n\n* 9 minor enhancements:\n\n  * User-friendly #gesvd and #gesdd updates (by @ryanmt)\n\n  * Added experimental #yale_row_key_intersection function for expert\n    recommendation problems\n\n  * Added additional *indgen shortcuts and changed behavior for some;\n    now, #cindgen for :complex64, #zindgen for :complex128, #findgen\n    for :float32, #dindgen for :float64, #rindgen for :rational128,\n    and #rbindgen for Ruby objects (which contain integers); also,\n    removed code repetition\n\n  * Changed #stddev to use elementwise #sqrt instead of a manual map\n    block (by @cjfuller)\n\n  * Added alias from MATLAB `load_mat` method to `load` for\n    consistency with the MatrixMarket loader\n\n  * Improved organization by moving list and yale code into storage/\n    subdirectories\n\n  * Added NMatrix#potrf! and NMatrix#getrf, which are instance methods\n    for calling CLAPACK functions (NMatrix#getrf! already existed)\n\n  * Added GCC installation instructions for Mac OS X Mavericks, and\n    updated the old installation instructions for Mac OS X (both\n    found in scripts/)\n\n  * Switched NMatrix::VERSION to work more like Rails::VERSION, with\n    support for MAJOR, MINOR, TINY, and PRE\n\n  * Added #concat, #hconcat, #vconcat, and #dconcat for joining\n    matrices together\n\n* 16 bug fixes:\n\n  * Spec revisions for lapack_gesdd and lapack_gesvd (by @ryanmt)\n\n  * Fixed two double-free problems (by @cjfuller and @mohawkjohn)\n\n  * Fixed contiguous array marking fencepost error\n\n  * Fixed C/C++ API compatibility problem preventing rb/gsl linking\n\n  * Resolved a number of compiler warnings, including one return-type\n    problem that would likely have become a garbage collection error\n    (if it wasn't already)\n\n  * Fixed -O3 optimization problems\n\n  * Restored NMatrix#asum, #nrm2, #binned_sorted_indices,\n    #sorted_indices which were inadvertantly removed by NVector\n    deprecation; have not tested\n\n  * Experimental #yale_nd_row and functions which call it now checks\n    range of argument to prevent segfault\n\n  * Fixed :* shortcut for a full list dimension (by @cjfuller)\n\n  * Fixed list construction problem which occurred when an initial\n    value array was provided (by @cjfuller)\n\n  * Fixed #inject issue with list and yale matrices of two dimensions\n    (by @cjfuller)\n\n  * Fixed several garbage collection problems (also listed under\n    enhancements) (by @cjfuller)\n\n  * Updated object cleaning target in extconf.rb\n\n  * Fixed possible compilation problem on Mavericks with Xcode 5.02\n\n  * Fixed errors involving undefined symbols, unresolved symbols, and\n    lazy symbol binding\n\n  * Improved LAPACK and BLAS header selection for Ubuntu/Debian\n    systems with ATLAS (by @mvz)\n\n=== 0.1.0.rc2 / 2014-03-12\n\n* No major enhancements.\n\n* 14 minor enhancements:\n\n  * Implemented negative-index slicing (by @rajatkapoor)\n\n  * Added reader for Point Cloud Library's PCD format\n\n  * Added Ruby 2.1 support (including Travis CI testing)\n\n  * Implemented LAPACK-independent exact inverse calculation for\n    dense matrices of size 2x2 and 3x3, as well as\n\n  * Added NMatrix::has_clapack? method to determine whether CLAPACK\n    support has been compiled in\n\n  * Improved conformance of specs to RSpec best practices (by \n    @duggiefresh)\n\n  * Travis CI now updates the IRC channel when a check passes (by\n    @agarie)\n\n  * Added NMatrix#data_pointer, which returns the memory address of\n    the stored elements in a matrix (generally for use with FFI and\n    other libraries that need pointers)\n\n  * Made NMatrix#clone_structure a public method (was protected)\n\n  * Added :scale option for NMatrix::random to handle non-floating\n    point forms\n\n  * Added complex support to NMatrix::random\n\n  * Aliased NMatrix::random to NMatrix::rand\n\n  * Added NMatrix#reshape! for in-place reshape of dense matrices (by\n    @rajatkapoor)\n\n  * Implemented unary negation of matrices\n\n* 6 bug fixes:\n\n  * Fixed dot product operation on 1-dimensional matrices (by @rve\n    and @cjfuller)\n\n  * Fixed segfault on 1-dimensional matrix transpose (by @cjfuller)\n\n  * Fixed compile error with Ruby 2.1 (by @diminish7)\n\n  * Fixed regression which wasn't causing any problems but was\n    counter to design: stride was declared prior to data storage for\n    dense matrix storage\n\n  * Fixed Rakefile problem which was causing specs to run twice in a\n    row with each call to rake spec\n\n  * NMatrix::random now raises an exception when rational matrices\n    are requested\n\n=== 0.1.0.rc3 / 2014-03-27\n\n* No major enhancements.\n\n* 2 minor enhancements:\n\n  * Exposed NMatrix::LAPACK::geev for LAPACK's xGEEV\n\n  * Added out-of-place complex conjugate for dense and yale storage \n    (by @rve)\n\n* 1 bug fixes:\n\n  * Fixed critical bug with transposing a matrix reference slice (by\n    @rajatkapoor)\n\n=== 0.1.0.rc4 / 2014-07-24\n\n* No major enhancements.\n\n* 1 minor enhancement:\n\n  * NMatrix#floor and #ceil implemented (by @v0dro)\n\n* 2 bug fixes:\n\n  * Disallowed out-of-bounds rank calls (by @andrewcsmith)\n\n  * Fixed rspec 3.0 conflict with rspec-longrun 1.0.1\n\n=== 0.1.0.rc5 / 2014-08-01\n\n* No major enhancements.\n\n* 1 minor enhancements:\n\n  * Added optional extension for generating homogeneous\n    transformation matrices for rotations and translations in three\n    dimensions\n\n* 3 bug fixes:\n\n  * Fixed rake install (by @duggiefresh)\n\n  * Fixed API problems which prevented NMatrix from working with\n    the SciRuby rb-gsl fork\n\n  * Fixed Yale #inject behavior (by @yoongkang)\n\n=== 0.1.0 / 2014-12-11\n\n* 3 major enhancements:\n\n  * Updated to BSD 3-clause license\n\n  * Removed Ruby 1.9.2 support; now requires Ruby 1.9.3 or \n    higher (by @v0dro)\n\n  * Added Gauss-Jordan elimination for calculation of\n    matrix inverses (by @v0dro)\n\n* 6 minor enhancements:\n\n  * Added trace method for square matrices\n\n  * Improved Array#to_nm monkey patch so matrices and\n    arrays can be interconverted easily, without need for a\n    shape argument (by @andrewcsmith)\n\n  * Added Harwell-Boeing and Fortran matrix format parsers\n    (by @v0dro)\n\n  * Removed soon-to-be-deprecated autoloads and replaced\n    with a more robust method (by @duggiefresh)\n\n  * Updated rake install task to use Bundler's GemHelper\n    install task (by @duggiefresh)\n\n  * Moved packable requirement from Gemfile to gemspec\n    (by @andrewcsmith)\n\n* 3 bug fixes:\n\n  * Corrected Ubuntu clapack functionality checking, which\n    should fix most functions which rely on the ATLAS\n    version of clapack\n\n  * Corrected NMatrix::gesdd workspace request size (by\n    @yoongkang)\n\n  * Fixed definition of NMatrix#asum for one-by-one\n    matrices (by @andrewcsmith)\n\n=== 0.2.0 / 2015-08-24\n\n* 2 major enhancements:\n\n  * External libraries are now linked via optional plugins,\n    removing ATLAS dependencies (by @wlevine)\n\n  * Made it possible to use NMatrix together with NArray (by\n    @blackwinter)\n\n* 9 minor enhancements:\n\n  * Removed rational types (by @wlevine)\n\n  * Added block-diagonal matrix generation method,\n    NMatrix.block_diagonal (by @agisga)\n\n  * Added Kronecker product method #kron_prod (by @agisga)\n\n  * Made #permute_columns usage more intuitive (@agisga)\n\n  * Added #pow method to raise a matrix to an integer power\n    (by @v0dro)\n\n  * Added #meshgrid method (by @dilcom)\n\n  * Added #hessenberg method, for reducing matrices to upper\n    Hessenberg form (by @v0dro)\n\n  * Added calculation of correlation matrix with #corr and\n    covariance using #cov (by @v0dro)\n\n  * Added method for returning matrix diagonal, #diagonal\n    (by @v0dro)\n\n* 11 bug fixes:\n\n  * Fixed #== operator problems (by @wlevine)\n\n  * Fixed BLAS.gemv (by @wlevine)\n\n  * Fixed #asum for single element complex matrices (by\n    @wlevine)\n\n  * Fixed determinant calculation (by @wlevine)\n\n  * Fixed division by zero (by @spacewander)\n\n  * Fixed NMatrix#respond_to? so it would accept two\n    arguments properly (by @ktns)\n\n  * Fixed NMatrix#hermitian? (by @agisga)\n\n  * Fixed #gesdd, #gesvd (by @wlevine)\n\n  * Fixed #symmetric? (by @ktns)\n\n  * Made rdoc a development dependency to resolve dependency\n    conflicts (by @matt-liu)\n\n  * Fixed bug where Array#to_nm would alter the array (by\n    @andrewcsmith)\n\n=== 0.2.1 / 2016-01-18\n\n* 3 major enhancements:\n  \n  * New plugin nmatrix-fftw for wrapping over FFTW (by\n     @v0dro)\n  \n  * Change Ruby Array C pointer references to be compatible\n    with Ruby 2.3.0 (by @mrkn)\n\n  * NMatrix can now be compiled with clang (by @mrkn)\n\n* 4 minor enhancements: \n\n  * Improved Travis configs to test on Linux and OSX with\n    and without plugins (by @mrkn)\n\n  * Added non-abbreviated versions to the options of\n    NMatrix#solve; added more docs (by @agisga)\n\n  * Added several specialized algorithms to NMatrix#solve\n    for more efficient solving of linear systems with upper\n    or lower triangular matrices (by @agisga)\n\n  * Remove redundant C implementation of\n    NMatrix#complex_conjugate (by @yoongkang)\n\n* 4 bug fixes:\n\n  * Fixed memory leak in math.cpp (inverse()) (by @lokeshh)\n\n  * Check if optional permute parameter in NMatrix#transpose\n    is an Array to prevent unexpected disappearing-parameter\n    behavior (by @firemind)\n    \n  * Moved rubyobj_from_cval function into `nm` namespace\n    from C-linkage to fix a C compile time error (by @mrkn)\n\n  * Fixed undefined variable 'idefaults' in lapacke extconf\n    (by @agisga)\n\n=== 0.2.2 / 2016-07-22\n\n* No major enhancements.\n\n* 15 minor enhancements:\n\n  * Added Hilbert and inverse Hilbert matrix functions\n    #hilbert and #inv_hilbert (by @arafatk)\n\n  * Added NMatrix.linspace constructor for generating a \n    vector with linearly spaced elements (by @gau27)\n\n  * Added NMatrix.logspace constructor for generating a\n    vector with logarithmically spaced elements (by @gau27)\n\n  * Improved Travis configs (by @v0dro)\n\n  * Added C API documentation and included ruby_constants.h\n    in C API include files (by @v0dro)\n\n  * Added #magic function to create magic square matrices\n    (by @shahsaurabh0605)\n\n  * Added NMatrix#last (by @gau27)\n\n  * Added QR factorization by exposing LAPACK functions\n    GEQRF, ORMQR, UNMQR (by @gau27)\n\n  * Made templates a little smarter for those functions\n    which require a separate return dtype by adding the\n    MagnitudeDType template typename; and added a magnitude\n    function to replace std::abs and abs to make complex\n    and real templates more generic (by @mohawkjohn)\n\n  * Added #adjugate and #adjugate! functions (by @sujithvm)\n\n  * Added #scale and #scale! methods by exposing BLAS SCAL\n    (by @lds56)\n\n  * Re-factored type comparisons to use RB_TYPE_P instead of\n    TYPE(obj) (by @mrkn)\n\n  * Updated license to BSD 3-clause (by @gau27)\n\n  * Cleaned up gem installation settings and dependencies\n    (by @mrkn)\n\n  * DRYed up extconf script (by @mrkn)\n\n* 15 bug fixes:\n\n  * Fixed offsets and changed limits in TRSM to follow the\n    Fortran implementation (by @lokeshh), and adjusted\n    triangular #solve accordingly (by @agisga)\n\n  * Fixed NRM2 (CBLAS 2-norm) for complex types (by\n    @lokeshh)\n\n  * Fixed NRM2 divide-by-zero bug (by @arafatk)\n\n  * Fixed #reshape! to work when changing dimensionality (by\n    @wlevine)\n\n  * Fixed ambiguous references by making proper use of\n    namespace qualifiers for dtypes, allowing compilation in\n    Windows using the mingw64 toolchain available through\n    msys2 (by @preetpalS)\n\n  * Replaced all uses of u_int8_t with uint8_t and added static\n    assertions to prevent ambiguous use of u_intX_t types\n    (by @preetpalS)\n\n  * Added workaround in extconf script for Windows use of a\n    different name for the null device (by @preetpalS)\n\n  * Updated deprecated RSpec code and other miscellaneous\n    cleanups (by @yui-knk)\n\n  * Removed incomplete support of Hash as an argument for\n    NMatrix#[] (by @yui-knk)\n\n  * Fixed typo in slicing exception error message (by\n    @mohawkjohn)\n\n  * Fixed #concat implementation for case of differing\n    sizes along concatenation dimension (by @alno)\n\n  * Ensured dtype is preserved by #repeat (by @alno)\n\n  * Fixed #det_exact for :object dtype (by @isuruf)\n\n  * Stopped using deprecated register storage class\n    specifier (by @mrkn)\n\n  * Fixed clang/clang++ compiler selection by forcing use of\n    clang++ when clang is used (by @mrkn)\n\n=== 0.2.3 / 2016-07-25\n\n* No major enhancements.\n\n* No minor enhancements.\n\n* 1 bug fix:\n\n  * Fixed gem installation problem caused by mkmf\n    abstraction (by @mrkn)\n\n=== 0.2.4 / 2017-12-14\n\n* No major enhancements.\n\n* 2 minor enhancements:\n\n  * Eliminated code reuse in math.rb between JRuby and MRI\n    versions of library (by @prasunanand)\n\n  * Slightly simplified #positive_definite? (by\n    @prasunanand)\n\n* 2 bug fixes:\n\n  * Fixed compilation problem on Mac OS X High Sierra (by\n    @mohawkjohn)\n\n  * Fixed failing #block_diagonal spec (due to missing\n    Array#sum) (by @mohawkjohn)\n"
  },
  {
    "path": "LICENSE.txt",
    "content": "This version of NMatrix is licensed under the BSD 3-clause license.\n\n* http://sciruby.com\n* http://github.com/sciruby/sciruby/wiki/License\n\nYou *must* read the Contributor Agreement before contributing code to the SciRuby Project. This is available online:\n\n* http://github.com/sciruby/sciruby/wiki/Contributor-Agreement\n\n-----\n\nCopyright (c) 2010 - 2015, John Woods and the Ruby Science Foundation\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "Manifest.txt",
    "content": "History.txt\nManifest.txt\nREADME.rdoc\nLICENSE.txt\nCONTRIBUTING.md\nRakefile\nGemfile\nnmatrix.gemspec\nnmatrix-atlas.gemspec\nnmatrix-lapacke.gemspec\nnmatrix-fftw.gemspec\ntravis.sh\n.travis.yml\nlib/nmatrix.rb\nlib/nmatrix/atlas.rb\nlib/nmatrix/blas.rb\nlib/nmatrix/enumerate.rb\nlib/nmatrix/homogeneous.rb\nlib/nmatrix/math.rb\nlib/nmatrix/mkmf.rb\nlib/nmatrix/monkeys.rb\nlib/nmatrix/nmatrix.rb\nlib/nmatrix/shortcuts.rb\nlib/nmatrix/version.rb\nlib/nmatrix/yale_functions.rb\nlib/nmatrix/fftw.rb\nlib/nmatrix/lapack_core.rb\nlib/nmatrix/lapack_ext_common.rb\nlib/nmatrix/lapack_plugin.rb\nlib/nmatrix/lapacke.rb\nlib/nmatrix/rspec.rb\nlib/nmatrix/io/market.rb\nlib/nmatrix/io/mat5_reader.rb\nlib/nmatrix/io/mat_reader.rb\nlib/nmatrix/io/point_cloud.rb\nlib/nmatrix/io/fortran_format.rb\nlib/nmatrix/io/harwell_boeing.rb\nlib/nmatrix/cruby/math.rb\nlib/nmatrix/jruby/decomposition.rb\nlib/nmatrix/jruby/enumerable.rb\nlib/nmatrix/jruby/error.rb\nlib/nmatrix/jruby/math.rb\nlib/nmatrix/jruby/nmatrix_java.rb\nlib/nmatrix/jruby/operators.rb\nlib/nmatrix/jruby/slice.rb\next/nmatrix/math/cblas_enums.h\next/nmatrix/math/cblas_templates_core.h\next/nmatrix/math/util.h\next/nmatrix/math/magnitude.h\next/nmatrix_atlas/extconf.rb\next/nmatrix_atlas/math_atlas.cpp\next/nmatrix_atlas/math_atlas/cblas_templates_atlas.h\next/nmatrix_atlas/math_atlas/clapack_templates.h\next/nmatrix_atlas/math_atlas/geev.h\next/nmatrix_atlas/math_atlas/gesdd.h\next/nmatrix_atlas/math_atlas/gesvd.h\next/nmatrix_atlas/math_atlas/inc.h\next/nmatrix_atlas/nmatrix_atlas.cpp\next/nmatrix_java/nmatrix/data/Complex.java\next/nmatrix_java/nmatrix/math/MathHelper.java\next/nmatrix_java/nmatrix/storage/dense/ArrayComparator.java\next/nmatrix_java/nmatrix/util/ArrayGenerator.java\next/nmatrix_java/nmatrix/util/MatrixGenerator.java\next/nmatrix_java/nmatrix/util/WrapperType.java\next/nmatrix_java/test/AssertTests.java\next/nmatrix_java/test/TestRunner.java\next/nmatrix_lapacke/extconf.rb\next/nmatrix_lapacke/lapacke.cpp\next/nmatrix_lapacke/lapacke/include/lapacke.h\next/nmatrix_lapacke/lapacke/include/lapacke_config.h\next/nmatrix_lapacke/lapacke/include/lapacke_mangling.h\next/nmatrix_lapacke/lapacke/include/lapacke_mangling_with_flags.h\next/nmatrix_lapacke/lapacke/include/lapacke_utils.h\next/nmatrix_lapacke/lapacke/src/lapacke_cgeev.c\next/nmatrix_lapacke/lapacke/src/lapacke_cgeev_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_cgesdd.c\next/nmatrix_lapacke/lapacke/src/lapacke_cgesdd_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_cgesvd.c\next/nmatrix_lapacke/lapacke/src/lapacke_cgesvd_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_cgetrf.c\next/nmatrix_lapacke/lapacke/src/lapacke_cgetrf_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_cgetri.c\next/nmatrix_lapacke/lapacke/src/lapacke_cgetri_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_cgetrs.c\next/nmatrix_lapacke/lapacke/src/lapacke_cgetrs_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_cpotrf.c\next/nmatrix_lapacke/lapacke/src/lapacke_cpotrf_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_cpotri.c\next/nmatrix_lapacke/lapacke/src/lapacke_cpotri_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_cpotrs.c\next/nmatrix_lapacke/lapacke/src/lapacke_cpotrs_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_dgeev.c\next/nmatrix_lapacke/lapacke/src/lapacke_dgeev_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_dgesdd.c\next/nmatrix_lapacke/lapacke/src/lapacke_dgesdd_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_dgesvd.c\next/nmatrix_lapacke/lapacke/src/lapacke_dgesvd_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_dgetrf.c\next/nmatrix_lapacke/lapacke/src/lapacke_dgetrf_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_dgetri.c\next/nmatrix_lapacke/lapacke/src/lapacke_dgetri_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_dgetrs.c\next/nmatrix_lapacke/lapacke/src/lapacke_dgetrs_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_dpotrf.c\next/nmatrix_lapacke/lapacke/src/lapacke_dpotrf_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_dpotri.c\next/nmatrix_lapacke/lapacke/src/lapacke_dpotri_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_dpotrs.c\next/nmatrix_lapacke/lapacke/src/lapacke_dpotrs_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_sgeev.c\next/nmatrix_lapacke/lapacke/src/lapacke_sgeev_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_sgesdd.c\next/nmatrix_lapacke/lapacke/src/lapacke_sgesdd_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_sgesvd.c\next/nmatrix_lapacke/lapacke/src/lapacke_sgesvd_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_sgetrf.c\next/nmatrix_lapacke/lapacke/src/lapacke_sgetrf_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_sgetri.c\next/nmatrix_lapacke/lapacke/src/lapacke_sgetri_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_sgetrs.c\next/nmatrix_lapacke/lapacke/src/lapacke_sgetrs_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_spotrf.c\next/nmatrix_lapacke/lapacke/src/lapacke_spotrf_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_spotri.c\next/nmatrix_lapacke/lapacke/src/lapacke_spotri_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_spotrs.c\next/nmatrix_lapacke/lapacke/src/lapacke_spotrs_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_zgeev.c\next/nmatrix_lapacke/lapacke/src/lapacke_zgeev_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_zgesdd.c\next/nmatrix_lapacke/lapacke/src/lapacke_zgesdd_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_zgesvd.c\next/nmatrix_lapacke/lapacke/src/lapacke_zgesvd_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_zgetrf.c\next/nmatrix_lapacke/lapacke/src/lapacke_zgetrf_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_zgetri.c\next/nmatrix_lapacke/lapacke/src/lapacke_zgetri_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_zgetrs.c\next/nmatrix_lapacke/lapacke/src/lapacke_zgetrs_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_zpotrf.c\next/nmatrix_lapacke/lapacke/src/lapacke_zpotrf_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_zpotri.c\next/nmatrix_lapacke/lapacke/src/lapacke_zpotri_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_zpotrs.c\next/nmatrix_lapacke/lapacke/src/lapacke_zpotrs_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_cgeqrf.c\next/nmatrix_lapacke/lapacke/src/lapacke_cgeqrf_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_cunmqr.c\next/nmatrix_lapacke/lapacke/src/lapacke_cunmqr_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_dgeqrf.c\next/nmatrix_lapacke/lapacke/src/lapacke_dgeqrf_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_dormqr.c\next/nmatrix_lapacke/lapacke/src/lapacke_dormqr_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_sgeqrf.c\next/nmatrix_lapacke/lapacke/src/lapacke_sgeqrf_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_sormqr.c\next/nmatrix_lapacke/lapacke/src/lapacke_sormqr_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_zgeqrf.c\next/nmatrix_lapacke/lapacke/src/lapacke_zgeqrf_work.c\next/nmatrix_lapacke/lapacke/src/lapacke_zunmqr.c\next/nmatrix_lapacke/lapacke/src/lapacke_zunmqr_work.c\next/nmatrix_lapacke/lapacke/utils/lapacke_c_nancheck.c\next/nmatrix_lapacke/lapacke/utils/lapacke_d_nancheck.c\next/nmatrix_lapacke/lapacke/utils/lapacke_s_nancheck.c\next/nmatrix_lapacke/lapacke/utils/lapacke_z_nancheck.c\next/nmatrix_lapacke/lapacke/utils/lapacke_cge_nancheck.c\next/nmatrix_lapacke/lapacke/utils/lapacke_cge_trans.c\next/nmatrix_lapacke/lapacke/utils/lapacke_cpo_nancheck.c\next/nmatrix_lapacke/lapacke/utils/lapacke_cpo_trans.c\next/nmatrix_lapacke/lapacke/utils/lapacke_ctr_nancheck.c\next/nmatrix_lapacke/lapacke/utils/lapacke_ctr_trans.c\next/nmatrix_lapacke/lapacke/utils/lapacke_dge_nancheck.c\next/nmatrix_lapacke/lapacke/utils/lapacke_dge_trans.c\next/nmatrix_lapacke/lapacke/utils/lapacke_dpo_nancheck.c\next/nmatrix_lapacke/lapacke/utils/lapacke_dpo_trans.c\next/nmatrix_lapacke/lapacke/utils/lapacke_dtr_nancheck.c\next/nmatrix_lapacke/lapacke/utils/lapacke_dtr_trans.c\next/nmatrix_lapacke/lapacke/utils/lapacke_lsame.c\next/nmatrix_lapacke/lapacke/utils/lapacke_sge_nancheck.c\next/nmatrix_lapacke/lapacke/utils/lapacke_sge_trans.c\next/nmatrix_lapacke/lapacke/utils/lapacke_spo_nancheck.c\next/nmatrix_lapacke/lapacke/utils/lapacke_spo_trans.c\next/nmatrix_lapacke/lapacke/utils/lapacke_str_nancheck.c\next/nmatrix_lapacke/lapacke/utils/lapacke_str_trans.c\next/nmatrix_lapacke/lapacke/utils/lapacke_xerbla.c\next/nmatrix_lapacke/lapacke/utils/lapacke_zge_nancheck.c\next/nmatrix_lapacke/lapacke/utils/lapacke_zge_trans.c\next/nmatrix_lapacke/lapacke/utils/lapacke_zpo_nancheck.c\next/nmatrix_lapacke/lapacke/utils/lapacke_zpo_trans.c\next/nmatrix_lapacke/lapacke/utils/lapacke_ztr_nancheck.c\next/nmatrix_lapacke/lapacke/utils/lapacke_ztr_trans.c\next/nmatrix_lapacke/lapacke_nmatrix.h\next/nmatrix_lapacke/make_lapacke_cpp.rb\next/nmatrix_lapacke/math_lapacke.cpp\next/nmatrix_lapacke/math_lapacke/cblas_local.h\next/nmatrix_lapacke/math_lapacke/cblas_templates_lapacke.h\next/nmatrix_lapacke/math_lapacke/lapacke_templates.h\next/nmatrix_lapacke/nmatrix_lapacke.cpp\next/nmatrix/data/complex.h\next/nmatrix/data/data.cpp\next/nmatrix/data/data.h\next/nmatrix/data/meta.h\next/nmatrix/data/ruby_object.h\next/nmatrix/storage/common.cpp\next/nmatrix/storage/common.h\next/nmatrix/storage/storage.cpp\next/nmatrix/storage/storage.h\next/nmatrix/storage/dense/dense.cpp\next/nmatrix/storage/dense/dense.h\next/nmatrix/storage/list/list.cpp\next/nmatrix/storage/list/list.h\next/nmatrix/storage/yale/yale.cpp\next/nmatrix/storage/yale/yale.h\next/nmatrix/storage/yale/class.h\next/nmatrix/storage/yale/iterators/base.h\next/nmatrix/storage/yale/iterators/iterator.h\next/nmatrix/storage/yale/iterators/row.h\next/nmatrix/storage/yale/iterators/row_stored.h\next/nmatrix/storage/yale/iterators/row_stored_nd.h\next/nmatrix/storage/yale/iterators/stored_diagonal.h\next/nmatrix/storage/yale/math/transpose.h\next/nmatrix/util/io.cpp\next/nmatrix/util/io.h\next/nmatrix/util/sl_list.cpp\next/nmatrix/util/sl_list.h\next/nmatrix/util/util.h\next/nmatrix/math.cpp\next/nmatrix/math/asum.h\next/nmatrix/math/gemm.h\next/nmatrix/math/gemv.h\next/nmatrix/math/getrf.h\next/nmatrix/math/getrs.h\next/nmatrix/math/imax.h\next/nmatrix/math/laswp.h\next/nmatrix/math/long_dtype.h\next/nmatrix/math/math.h\next/nmatrix/math/nrm2.h\next/nmatrix/math/rot.h\next/nmatrix/math/rotg.h\next/nmatrix/math/scal.h\next/nmatrix/math/trsm.h\next/nmatrix/nmatrix.cpp\next/nmatrix/nmatrix.h\next/nmatrix/ruby_constants.cpp\next/nmatrix/ruby_constants.h\next/nmatrix/ruby_nmatrix.c\next/nmatrix/types.h\next/nmatrix/nm_memory.h\next/nmatrix/extconf.rb\next/nmatrix_fftw/extconf.rb\next/nmatrix_fftw/nmatrix_fftw.cpp\n"
  },
  {
    "path": "README.rdoc",
    "content": "= NMatrix\n\n{<img src=\"https://badges.gitter.im/SciRuby/nmatrix.svg\" alt=\"Join the chat at https://gitter.im/SciRuby/nmatrix\">}[https://gitter.im/SciRuby/nmatrix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge]\n\nFast Numerical Linear Algebra Library for Ruby\n\n* {sciruby.com}[http://sciruby.com]\n* {Google+}[https://plus.google.com/109304769076178160953/posts]\n* {Google Group - Mailing List}[https://groups.google.com/forum/#!forum/sciruby-dev]\n* {NMatrix Installation wiki}[https://github.com/SciRuby/nmatrix/wiki/Installation]\n* {SciRuby Installation guide}[http://sciruby.com/docs#installation]\n\n{<img src=https://travis-ci.org/SciRuby/nmatrix.png>}[https://travis-ci.org/SciRuby/nmatrix]\n\n{<img src=\"https://codeclimate.com/github/SciRuby/nmatrix.png\" />}[https://codeclimate.com/github/SciRuby/nmatrix]\n\n== Description\n\nNMatrix is a fast numerical linear algebra library for Ruby, with\ndense and sparse matrices, written mostly in C and C++ (and with\nexperimental JRuby support). It is part of the SciRuby project.\n\nNMatrix was inspired by {NArray}[http://narray.rubyforge.org], by Masahiro Tanaka.\n\nSeveral gems are provided in this repository:\n* +nmatrix+\n* +nmatrix-java+\n* +nmatrix-atlas+\n* +nmatrix-lapacke+\n* +nmatrix-fftw+\n\n== Installation\n\nTo install the latest stable version:\n\n    gem install nmatrix\n\nNMatrix was originally written in C/C++, but an experimental JRuby version is also included (see instructions below for JRuby). For the MRI (C/C++) version, you need:\n* Ruby 2.0 or later\n* a compiler supporting C++11 (clang or GCC)\n\nTo install the +nmatrix-atlas+ or +nmatrix-lapacke+ extensions, an additional requirement is a\ncompatible LAPACK library.\nDetailed directions for this step can be found\n{here}[https://github.com/SciRuby/nmatrix/wiki/Installation].\n\nIf you want to obtain the latest (development) code, you should generally do:\n\n    git clone https://github.com/SciRuby/nmatrix.git\n    cd nmatrix/\n    gem install bundler\n    bundle install\n    bundle exec rake compile\n    bundle exec rake spec\n\nIf you want to try out the code without installing:\n\n    bundle exec rake pry\n\nTo install:\n\n    bundle exec rake install\n\n=== JRuby\n\nFirst, you need to download Apache Commons Math 3.6.1 (the JAR, which\nyou can find in the binary package). For example, in the NMatrix\ndirectory, do:\n\n    wget https://www.apache.org/dist/commons/math/binaries/commons-math3-3.6.1-bin.tar.gz\n    tar zxvf commons-math3-3.6.1-bin.tar.gz\n    mkdir ext/nmatrix_java/vendor/\n    cp commons-math3-3.6.1/commons-math3-3.6.1.jar ext/nmatrix_java/vendor/\n\nNext, create build directories:\n\n    mkdir -p ext/nmatrix_java/build/class\n    mkdir ext/nmatrix_java/target\n\nFinally, compile and package as jar.\n\n    rake jruby\n\n=== Plugins\n\nThe commands above build and install only the core +nmatrix+ gem.  If\nyou want to build one or more of the plugin gems (+nmatrix-atlas+,\n+nmatrix-lapacke+) in addition to the core nmatrix gem, use the\n<tt>nmatrix_plugins=</tt> option, e.g.  <tt>rake compile\nnmatrix_plugins=all</tt>, <tt>rake install nmatrix_plugins=atlas</tt>,\n<tt>rake clean nmatrix_plugins=atlas,lapacke</tt>. Each of these\ncommands apply to the +nmatrix+ gem and any additional plugin gems\nspecified. For example, <tt>rake spec nmatrix_plugins=atlas</tt> will\ntest both the core +nmatrix+ gem and the +nmatrix-atlas+ gem.\n\n=== Upgrading from NMatrix 0.1.0\n\nIf your code requires features provided by ATLAS (Cholesky\ndecomposition, singular value decomposition, eigenvalues/eigenvectors,\ninverses of matrices bigger than 3-by-3), your code now depends on the\n+nmatrix-atlas+ gem. You will need to add this a dependency of your\nproject and <tt>require 'nmatrix/atlas'</tt> in addition to\n<tt>require 'nmatrix'</tt>. In most cases, no further changes should\nbe necessary, however there have been a few {API\nchanges}[https://github.com/SciRuby/nmatrix/wiki/API-Changes], please\ncheck to see if these affect you.\n\n== Documentation\n\nIf you have a suggestion or want to add documentation for any class or\nmethod in NMatrix, please open an issue or send a pull request with\nthe changes.\n\nYou can find the complete API documentation {on our\nwebsite}[http://sciruby.com/nmatrix/docs/].\n\n== Examples\n\nCreate a new NMatrix from a ruby Array:\n\n    >> require 'nmatrix'\n    >> NMatrix.new([2, 3], [0, 1, 2, 3, 4, 5], dtype: :int64)\n    => [\n        [0, 1, 2],\n        [3, 4, 5]\n       ]\n\nCreate a new NMatrix using the +N+ shortcut:\n\n    >> m = N[ [2, 3, 4], [7, 8, 9] ]\n    => [\n        [2, 3, 4],\n        [7, 8, 9]\n       ]\n    >> m.inspect\n    => #<NMatrix:0x007f8e121b6cf8shape:[2,3] dtype:int32 stype:dense>\n\nThe above output requires that you have a pretty-print-enabled console\nsuch as Pry; otherwise, you'll see the output given by +inspect+.\n\nIf you want to learn more about how to create a matrix, {read the guide in our wiki}[https://github.com/SciRuby/nmatrix/wiki/How-to-create-an-NMatrix].\n\nAgain, you can find the complete API documentation {on our website}[http://sciruby.com/nmatrix/docs/].\n\n=== Using advanced features provided by plugins\n\nCertain features (see the documentation) require either the\nnmatrix-atlas or the nmatrix-lapacke gem to be installed. These can be\naccessed by using <tt>require 'nmatrix/atlas'</tt> or <tt>require\n'nmatrix/lapacke'</tt>. If you don't care which of the two gems is\ninstalled, use <tt>require 'nmatrix/lapack_plugin'</tt>, which will\nrequire whichever one of the two is available.\n\nFast fourier transforms can be conducted with the nmatrix-fftw\nextension, which is an interface to the FFTW C library. Use\n<tt>require 'nmatrix/fftw'</tt> for using this plugin.\n\n== Plugin details\n\n=== ATLAS and LAPACKE\n\nThe +nmatrix-atlas+ and +nmatrix-lapacke+ gems are optional extensions \nof the main +nmatrix+ gem that rely on external linear algebra libraries \nto provide advanced features for dense matrices (singular value \ndecomposition, eigenvalue/eigenvector finding, Cholesky factorization), \nas well as providing faster implementations of common operations like \nmultiplication, inverses, and determinants. +nmatrix-atlas+ requires the\n{ATLAS library}[http://math-atlas.sourceforge.net/], while +nmatrix-lapacke+ \nis designed to work with various LAPACK implementations (including ATLAS). \nThe +nmatrix-atlas+ and +nmatrix-lapacke+ gems both provide similar \ninterfaces for using these advanced features.\n\n=== *FFTW*\n\nThis is plugin for interfacing with the {FFTW library}[http://www.fftw.org].\nIt has been tested with FFTW 3.3.4.\n\nIt works reliably only with 64 bit numbers (or the NMatrix `:float64`\nor `:complex128` data type). See the docs for more details.\n\n== NArray compatibility\n\nWhen NArray[http://masa16.github.io/narray/] is installed alongside\nNMatrix, <tt>require 'nmatrix'</tt> will inadvertently load NArray's\n+lib/nmatrix.rb+ file, usually accompanied by the following error:\n\n    uninitialized constant NArray (NameError)\n\nTo make sure NMatrix is loaded properly in the presence of NArray, use\n<tt>require 'nmatrix/nmatrix'</tt> instead of <tt>require\n'nmatrix'</tt> in your code.\n\n== Developers\n\nRead the instructions in +CONTRIBUTING.md+ if you want to help\nNMatrix.\n\n== Features\n\nThe following features exist in the current version of NMatrix (0.1.0.rc1):\n\n* Matrix and vector storage containers: dense, yale, list (more to come)\n* Data types: byte (uint8), int8, int16, int32, int64, float32, float64, complex64, complex128,\n  Ruby object\n* Interconversion between storage and data types\n* Element-wise and right-hand-scalar operations and comparisons for all matrix types\n* Matrix-matrix multiplication for dense (with and without ATLAS) and yale\n* Matrix-vector multiplication for dense (with and without ATLAS)\n* Lots of enumerators (each, each_with_indices, each_row, each_column, each_rank, map, etc.)\n* Matrix slicing by copy and reference (for dense, yale, and list)\n* Native reading and writing of dense and yale matrices\n  * Optional compression for dense matrices with symmetry or triangularity: symmetric, skew, hermitian, upper, lower\n* Input/output:\n  * Matlab .MAT v5 file input\n  * MatrixMarket file input/output\n  * Harwell-Boeing and Fortran file input\n  * Point Cloud Library PCD file input\n* C and C++ API\n* BLAS internal implementations (no library) and external (with nmatrix-lapack or nmatrix-atlas) access:\n  * Level 1: xROT, xROTG (BLAS dtypes only), xASUM, xNRM2, IxAMAX, xSCAL\n  * Level 2: xGEMV\n  * Level 3: xGEMM, xTRSM\n* LAPACK access (with nmatrix-lapack or nmatrix-atlas plugin):\n  * xGETRF, xGETRI, xGETRS, xGESV (Gaussian elimination)\n  * xPOTRF, xPOTRI, xPOTRS, xPOSV (Cholesky factorization)\n  * xGESVD, xGESDD (singular value decomposition)\n  * xGEEV (eigenvalue decomposition of asymmetric square matrices)\n* LAPACK-less internal implementations (no plugin or LAPACK needed and working on non-BLAS dtypes):\n  * xGETRF, xGETRS\n* LU decomposition\n* Matrix inversions\n* Determinant calculation for BLAS dtypes\n* Traces\n* Ruby/GSL interoperability (requires {SciRuby's fork of rb-gsl}[http://github.com/SciRuby/rb-gsl])\n* slice assignments, e.g.,\n    x[1..3,0..4] = some_other_matrix\n\n=== Planned features (Short-to-Medium Term)\n\nSee the issues tracker for a list of planned features or to request\nnew ones.\n\n== License\n\nCopyright (c) 2012--17, John Woods and the Ruby Science Foundation.\n\nAll rights reserved.\n\nNMatrix, along with SciRuby, is licensed under the BSD 2-clause\nlicense. See {LICENSE.txt}[https://github.com/SciRuby/sciruby/wiki/License]\nfor details.\n\n== Donations\n\nSupport a SciRuby Fellow:\n\n{<img src=http://pledgie.com/campaigns/15783.png?skin_name=chrome>}[http://www.pledgie.com/campaigns/15783]\n"
  },
  {
    "path": "Rakefile",
    "content": "# -*- ruby -*-\n\nrequire 'rubygems'\nrequire 'rubygems/package_task'\nrequire 'bundler'\n\n#Specify plugins to build on the command line like:\n#rake whatever nmatrix_plugins=atlas,lapacke\n#or\n#rake whatever nmatrix_plugins=all\n#If you want to build *only* plugins and not the core nmatrix gem:\n#rake whatever nmatrix_plugins=all nmatrix_core=false\nif ENV[\"nmatrix_plugins\"] == \"all\"\n  gemspecs = Dir[\"*.gemspec\"]\nelse\n  plugins = []\n  plugins = ENV[\"nmatrix_plugins\"].split(\",\") if ENV[\"nmatrix_plugins\"]\n  gemspecs = [\"nmatrix.gemspec\"] #always include the main nmatrix gem\n  plugins.each do |plugin|\n    gemspecs << \"nmatrix-#{plugin}.gemspec\"\n  end\nend\nif ENV[\"nmatrix_core\"] == \"false\"\n  gemspecs -= [\"nmatrix.gemspec\"]\nend\ngemspecs.map! { |gemspec| eval(IO.read(gemspec)) }\n\nbegin\n  Bundler.setup(:default, :development)\nrescue Bundler::BundlerError => e\n  $stderr.puts e.message\n  $stderr.puts \"Run `bundle install` to install missing gems\"\n  exit e.status_code\nend\n\ndesc \"Build and install into system gems.\"\ntask :install => :repackage do\n  gemspecs.each do |gemspec|\n    gem_file = \"pkg/#{gemspec.name}-#{gemspec.version}.gem\"\n    system \"gem install '#{gem_file}'\"\n  end\nend\n\nrequire 'rake'\nrequire \"rake/extensiontask\"\n\ngemspecs.each do |gemspec|\n  next unless gemspec.extensions\n  gemspec.extensions.each do |extconf|\n    ext_name = extconf.match(/ext\\/(.*)\\/extconf\\.rb/)[1]\n    Rake::ExtensionTask.new do |ext|\n      ext.name = ext_name\n      ext.ext_dir = \"ext/#{ext_name}\"\n      ext.lib_dir = 'lib/'\n      ext.source_pattern = \"**/*.{c,cpp,h}\"\n    end\n  end\nend\n\ngemspecs.each do |gemspec|\n  Gem::PackageTask.new(gemspec).define\nend\n\nrequire 'rspec/core/rake_task'\nrequire 'rspec/core'\nnamespace :spec do\n  #We need a separate rake task for each plugin, rather than one big task that\n  #runs all of the specs. This is because there's no way to tell rspec\n  #to run the specs in a certain order with (say) \"nmatrix/atlas\" require'd\n  #for some of the specs, but not for others, without splitting them up like\n  #this.\n  spec_tasks = []\n  gemspecs.each do |gemspec|\n    test_files = gemspec.test_files\n    test_files.keep_if { |file| file =~ /_spec\\.rb$/ }\n    test_files -= ['spec/nmatrix_yale_spec.rb', 'spec/blas_spec.rb', 'spec/lapack_core_spec.rb'] if /java/ === RUBY_PLATFORM\n    next if test_files.empty?\n    spec_tasks << gemspec.name\n    RSpec::Core::RakeTask.new(gemspec.name) do |spec|\n      spec.pattern = FileList.new(test_files)\n    end\n  end\n  task :all => spec_tasks\nend\n\n\n\ntask :spec => \"spec:all\"\n\nBASEDIR = Pathname( __FILE__ ).dirname.relative_path_from( Pathname.pwd )\nSPECDIR = BASEDIR + 'spec'\n\nVALGRIND_OPTIONS = [\n    \"--tool=memcheck\",\n    #\"--leak-check=yes\",\n    \"--num-callers=15\",\n    #\"--error-limit=no\",\n    \"--partial-loads-ok=yes\",\n    \"--undef-value-errors=no\" #,\n    #\"--dsymutil=yes\"\n]\n\nCALLGRIND_OPTIONS = [\n    \"--tool=callgrind\",\n    \"--dump-instr=yes\",\n    \"--simulate-cache=yes\",\n    \"--collect-jumps=yes\"\n]\n\nVALGRIND_MEMORYFILL_OPTIONS = [\n    \"--freelist-vol=100000000\",\n    \"--malloc-fill=6D\",\n    \"--free-fill=66 \",\n]\n\nGDB_OPTIONS = []\n\n\ntask :console do |task|\n  cmd = [ 'irb', \"-r './lib/nmatrix.rb'\" ]\n  run *cmd\nend\n\ntask :pry do |task|\n  cmd = [ 'pry', \"-r './lib/nmatrix.rb'\" ]\n  run *cmd\nend\n\nnamespace :pry do\n  task :valgrind => [ :compile ] do |task|\n    cmd  = [ 'valgrind' ] + VALGRIND_OPTIONS\n    cmd += ['ruby', '-Ilib:ext', \"-r './lib/nmatrix.rb'\", \"-r 'pry'\", \"-e 'binding.pry'\"]\n    run *cmd\n  end\nend\n\nnamespace :console do\n  CONSOLE_CMD = ['irb', \"-r './lib/nmatrix.rb'\"]\n  desc \"Run console under GDB.\"\n  task :gdb => [ :compile ] do |task|\n          cmd = [ 'gdb' ] + GDB_OPTIONS\n          cmd += [ '--args' ]\n          cmd += CONSOLE_CMD\n          run( *cmd )\n  end\n\n  desc \"Run console under Valgrind.\"\n  task :valgrind => [ :compile ] do |task|\n          cmd = [ 'valgrind' ] + VALGRIND_OPTIONS\n          cmd += CONSOLE_CMD\n          run( *cmd )\n  end\nend\n\ntask :default => :spec\n\ndef run *cmd\n  sh(cmd.join(\" \"))\nend\n\nnamespace :spec do\n  # partial-loads-ok and undef-value-errors necessary to ignore\n  # spurious (and eminently ignorable) warnings from the ruby\n  # interpreter\n\n  RSPEC_CMD = [ 'ruby', '-S', 'rspec', '-Ilib:ext', SPECDIR.to_s ]\n\n  #desc \"Run the spec for generator.rb\"\n  #task :generator do |task|\n  #  run 'rspec spec/generator_spec.rb'\n  #end\n\n  desc \"Run specs under GDB.\"\n  task :gdb => [ :compile ] do |task|\n          cmd = [ 'gdb' ] + GDB_OPTIONS\n    cmd += [ '--args' ]\n    cmd += RSPEC_CMD\n    run( *cmd )\n  end\n\n  desc \"Run specs under cgdb.\"\n  task :cgdb => [ :compile ] do |task|\n    cmd = [ 'cgdb' ] + GDB_OPTIONS\n    cmd += [ '--args' ]\n    cmd += RSPEC_CMD\n    run( *cmd )\n  end\n\n  desc \"Run specs under Valgrind.\"\n  task :valgrind => [ :compile ] do |task|\n    cmd = [ 'valgrind' ] + VALGRIND_OPTIONS\n    cmd += RSPEC_CMD\n    run( *cmd )\n  end\n\n  desc \"Run specs under Callgrind.\"\n  task :callgrind => [ :compile ] do |task|\n    cmd = [ 'valgrind' ] + CALLGRIND_OPTIONS\n    cmd += RSPEC_CMD\n    run( *cmd )\n  end\n\nend\n\n\nLEAKCHECK_CMD = [ 'ruby', '-Ilib:ext', \"#{SPECDIR}/leakcheck.rb\" ]\n\n\ndesc \"Run leakcheck script.\"\ntask :leakcheck => [ :compile ] do |task|\n  cmd = [ 'valgrind' ] + VALGRIND_OPTIONS\n  cmd += LEAKCHECK_CMD\n  run( *cmd )\nend\n\nnamespace :clean do\n  #the generated Makefile doesn't have a soclean target, should this be removed?\n  task :so do |task|\n    gemspecs.each do |gemspec|\n      next unless gemspec.extensions\n      gemspec.extensions.each do |extconf|\n        ext_name = extconf.match(/ext\\/(.*)\\/extconf\\.rb/)[1]\n        tmp_path = \"tmp/#{RUBY_PLATFORM}/#{ext_name}/#{RUBY_VERSION}\"\n        chdir tmp_path do\n          if RUBY_PLATFORM =~ /mswin/\n            `nmake soclean`\n          else\n            mkcmd = ENV['MAKE'] || %w[gmake make].find { |c| system(\"#{c} -v >> /dev/null 2>&1\") }\n            `#{mkcmd} soclean`\n          end\n        end\n      end\n    end\n  end\nend\n\n\ndesc \"Check the manifest for correctness\"\ntask :check_manifest do |task|\n  manifest_files  = File.read(\"Manifest.txt\").split\n\n  git_files       = `git ls-files |grep -v 'spec/'`.split\n  ignore_files    = %w{.gitignore .rspec ext/nmatrix/binary_format.txt scripts/ttable_helper.rb}\n\n  possible_files  = git_files - ignore_files\n\n  missing_files   = possible_files - manifest_files\n  extra_files     = manifest_files - possible_files\n\n  unless missing_files.empty?\n    STDERR.puts \"The following files are in the git repo but not the Manifest:\"\n    missing_files.each { |f| STDERR.puts \" -- #{f}\"}\n  end\n\n  unless extra_files.empty?\n    STDERR.puts \"The following files are in the Manifest but may not be necessary:\"\n    extra_files.each { |f| STDERR.puts \" -- #{f}\"}\n  end\n\n  if extra_files.empty? && missing_files.empty?\n    STDERR.puts \"Manifest looks good!\"\n  end\n\nend\n\nrequire \"rdoc/task\"\n#separate out docs for plugins?\nRDoc::Task.new do |rdoc|\n  rdoc.main = \"README.rdoc\"\n  rdoc.rdoc_files.include(%w{README.rdoc History.txt LICENSE.txt CONTRIBUTING.md lib ext})\n  rdoc.options << \"--exclude=ext/nmatrix/extconf.rb\"\n  rdoc.options << \"--exclude=ext/nmatrix_atlas/extconf.rb\"\n  rdoc.options << \"--exclude=ext/nmatrix/ttable_helper.rb\"\n  rdoc.options << \"--exclude=lib/nmatrix/rspec.rb\"\nend\n\n# jruby tasks\n\nnamespace :jruby do\n\n  PROJECT_DIR = File.expand_path(\".\",Dir.pwd)\n\n  BUILD_DIR = \"build\"\n  CLASSES_DIR = \"../build/classes\"\n  TEST_CLASSES_DIR = \"build/testClasses\"\n\n  JRUBY_DIR = \"#{PROJECT_DIR}/ext/nmatrix_java\"\n  VENDOR_DIR = \"#{JRUBY_DIR}/vendor\"\n  TARGET_DIR = \"#{JRUBY_DIR}/target\"\n\n  jars = Dir[\"#{VENDOR_DIR}/*.jar\"]\n\n  desc 'Compile java classes'\n  task :javac do\n    unless RUBY_PLATFORM == 'java'\n      abort 'Please run with JRuby'\n    end\n    sh \"mkdir -p #{JRUBY_DIR}/build/classes\"\n    Dir.chdir(\"#{JRUBY_DIR}/nmatrix\")\n    classes    = Dir['**/*.java']\n    sh \"javac -classpath #{jars.join(':')} -d #{CLASSES_DIR} #{classes.join(' ')}\"\n  end\n\n  desc 'Package java classes in a jar file'\n  task :jar do\n    unless RUBY_PLATFORM == 'java'\n      abort 'Please run with JRuby'\n    end\n    sh \"mkdir -p #{TARGET_DIR}\"\n    Dir.chdir(\"#{JRUBY_DIR}/build/classes\")\n    classes = Dir['**/*.class']\n    sh \"jar -cf #{TARGET_DIR}/nmatrix.jar #{classes.join(' ')}\"\n  end\n\n  task :all => [:javac, :jar]\nend\n\ndesc \"Compile java classes and Package them in a jar file\"\ntask :jruby => 'jruby:all'\n\nnamespace :travis do\n  task :env do\n    if /java/ === RUBY_PLATFORM\n      puts \"Building for jruby\"\n      sh \"mkdir ext/nmatrix_java/vendor\"\n      puts \"Downloading tar file.\"\n      sh \"wget http://www-eu.apache.org/dist//commons/math/binaries/commons-math3-3.6.1-bin.tar.gz\"\n      puts \"Unzipping tar file.\"\n      sh \"tar -zxf commons-math3-3.6.1-bin.tar.gz\"\n      puts \"Deleting tar file.\"\n      sh \"rm commons-math3-3.6.1-bin.tar.gz\"\n      sh \"cp -r commons-math3-3.6.1/commons-math3-3.6.1.jar ext/nmatrix_java/vendor\"\n    else\n      puts \"\\n# Build environment:\"\n      %w[\n        CC CXX\n        USE_ATLAS USE_OPENBLAS USE_REF NO_EXTERNAL_LIB\n        TRAVIS_OS_NAME TRAVIS_BRANCH TRAVIS_COMMIT TRAVIS_PULL_REQUEST\n      ].each do |name|\n        puts \"- #{name}: #{ENV[name]}\"\n      end\n\n      require 'rbconfig'\n      puts \"\\n# RbConfig::MAKEFILE_CONFIG values:\"\n      %w[\n        CC CXX CPPFLAGS CFLAGS CXXFLAGS\n      ].each do |name|\n        puts \"- #{name}: #{RbConfig::MAKEFILE_CONFIG[name]}\"\n      end\n\n      cc = RbConfig::MAKEFILE_CONFIG['CC']\n      puts \"\\n$ #{cc} -v\\n#{`#{cc} -v 2>&1`}\"\n    end\n  end\nend\n\n# vim: syntax=ruby\n"
  },
  {
    "path": "ext/nmatrix/binary_format.txt",
    "content": "This is the proposed binary format for saving and loading NMatrix objects.\n\nOrder is little-endian.\n\nList matrices should be converted to dense or yale matrices. There should be no serious need to load or save\nlinked-list matrices, since these exist primarily in order to construct efficient yale matrices.\n\n\nFirst 64-bit block:\n* ui16 major (version)\n* ui16 minor\n* ui16 release\n* i16 NULL\n\n\nSecond 64-bit block:\n* ui8 dtype\n* ui8 stype\n* ui8 itype # ui32 for dense\n* ui8 symm\n* i16 NULL\n* ui16 dim    # if 1, NVector; otherwise, NMatrix\n\n\n3rd - nth 64-bit block: shape\n\nitype sets the number of bytes allocated for each shape entry. Since only yale uses itype, dense will pretty\nmuch always be the UINT32 itype (see nmatrix.h). If the total number of bytes occupied by the shape array is\nless than 8, the rest of the 64-bit block will be padded with zeros.\n\n\n(n+1)th 64-bit block: depends on stype, symm\n\nsymm is designed to reduce file size by allowing us to not save certain elements in symmetric, hermitian, skew-\nsymmetric, and triangular matrices. These values will be defined in nmatrix.h; 0 indicates standard (no symmetry).\nIn later versions, additional patterns may be defined which might even have less to do with symmetry than\nupper/lower do.\n\nWhen storing a symmetric matrix, we will only store the upper portion. If the matrix is lower triangular, only the\nlower portion will be stored.\n\nFor dense, we simply store the contents of the matrix exactly as in memory (or just the upper-triangular part if\nsymm is set).\n\nFor yale, we store:\n* ui32 ndnz\n* ui32 length (AKA size, the number of elements in A/IJA that aren't nil/undefined)\n\nThe latter will serve as the capacity when we read a Yale matrix.\n\nThen we store the a array, again padding with zeros so it's a multiple of 8 bytes.\n\nThen we store the ija array, padding with zeros so it's a multiple of 8 bytes.\n"
  },
  {
    "path": "ext/nmatrix/data/complex.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == complex.h\n//\n// Functions and classes for dealing with complex numbers.\n\n#ifndef COMPLEX_H\n#define COMPLEX_H\n\n/*\n * Standard Includes\n */\n\n#include <ruby.h>\n#include <type_traits>\n#include <iostream>\n#include <cmath>\n\n/*\n * Project Includes\n */\n\n#include \"types.h\"\n\n/*\n * Macros\n */\n\n/*\n * Types\n */\nnamespace nm {\n\nclass RubyObject;\ntemplate <typename Type> class Complex;\n\ntypedef Complex<float32_t> Complex64;\ntypedef Complex<float64_t> Complex128;\n\n/*\n * Data\n */\n\n/*\n * Classes and Functions\n */\n\ntemplate <typename Type>\nclass Complex {\n  public:\n  // The real and immaginary parts of the complex number.\n  Type r;\n  Type i;\n\n  /*\n   * Default constructor.\n   */\n  inline Complex(Type real = 0, Type imaginary = 0) : r(real), i(imaginary) {}\n\n  /*\n   * Copy constructors.\n   */\n  template <typename ComplexType>\n  explicit inline Complex(const Complex<ComplexType>& other) : r(other.r), i(other.i) {}\n\n  template <typename ComplexType>\n  inline Complex<Type>& operator=(const Complex<ComplexType>& other) {\n    this->r = static_cast<Type>(other.r);\n    this->i = static_cast<Type>(other.i);\n    return *this;\n  }\n\n  explicit Complex(const RubyObject& other);\n\n  Complex<Type>& operator=(const RubyObject& other);\n\n  template<typename OtherType>\n  inline Complex<Type>& operator=(const OtherType& real) {\n    this->r = Type(real);\n    this->i = Type(0);\n    return *this;\n  }\n\n  /*\n   * Complex conjugate function -- creates a copy, but inverted.\n   */\n  inline Complex<Type> conjugate() const {\n    return Complex<Type>(this->r, -(this->i));\n  }\n\n  /*\n   * Complex inverse function -- creates a copy, but inverted.\n   *\n   * FIXME: Check that this doesn't duplicate functionality of NativeType / Complex<Type>\n   */\n  inline Complex<Type> inverse() const {\n    Complex<Type> conj = conjugate();\n    Type denom = this->r * this->r + this->i * this->i;\n    return Complex<Type>(conj.r / denom, conj.i / denom);\n  }\n\n  // Negative operator\n  inline Complex<Type> operator-() const {\n    return Complex<Type>(-this->r, -this->i);\n  }\n\n\n\n  /*\n   * Binary operator definitions for various types.\n   */\n\n  ////////////////////////////////\n  // Complex-Complex Operations //\n  ////////////////////////////////\n\n  template <typename OtherType>\n  inline Complex<Type> operator+(const Complex<OtherType>& other) const {\n    return Complex<Type>(this->r + other.r, this->i + other.i);\n  }\n\n  template <typename OtherType>\n  inline Complex<Type>& operator+=(const Complex<OtherType>& other) {\n    this->r += other.r;\n    this->i += other.i;\n    return *this;\n  }\n\n  template <typename OtherType>\n  inline Complex<Type>& operator-=(const Complex<OtherType>& other) {\n    this->r -= other.r;\n    this->i -= other.i;\n    return *this;\n  }\n\n  template <typename OtherType>\n  inline Complex<Type> operator-(const Complex<OtherType>& other) const {\n    return Complex<Type>(this->r - other.r, this->i - other.i);\n  }\n\n  template <typename OtherType>\n  inline Complex<Type> operator*(const Complex<OtherType>& other) const {\n    return Complex<Type>(this->r * other.r - this->i * other.i, this->r * other.i + this->i * other.r);\n  }\n\n  template <typename OtherType>\n  inline Complex<Type>& operator*=(const Complex<OtherType>& other) {\n    this->r = this->r * other.r - this->i * other.i;\n    this->i = this->r * other.i + this->i * other.r;\n    return *this;\n  }\n\n  template <typename OtherType>\n  inline Complex<Type> operator/(const Complex<OtherType>& other) const {\n    Type new_r, new_i;\n    Type denom = other.i * other.i + other.r * other.r;\n\n    new_r = (this->r * other.r + this->i * other.i) / denom;\n    new_i = (this->i * other.r - this->r * other.i) / denom;\n\n    return Complex<Type>(new_r, new_i);\n  }\n\n  template <typename OtherType>\n  inline Complex<Type> operator/=(const Complex<OtherType>& other) {\n    Type new_r, new_i;\n    Type denom = other.i * other.i + other.r * other.r;\n\n    new_r = (this->r * other.r + this->i * other.i) / denom;\n    new_i = (this->i * other.r - this->r * other.i) / denom;\n\n    this->r = new_r;\n    this->i = new_i;\n    return *this;\n  }\n\n  template <typename OtherType>\n  inline bool operator<(const Complex<OtherType>& other) const {\n    return (this->r < other.r) || ((this->r <= other.r) && (this->i < other.i));\n  }\n\n  template <typename OtherType>\n  inline bool operator>(const Complex<OtherType>& other) const {\n    return (this->r > other.r) || ((this->r >= other.r) && (this->i > other.i));\n  }\n\n  template <typename OtherType>\n  inline bool operator==(const Complex<OtherType>& other) const {\n    return FP_EQUAL(this->r, other.r) && FP_EQUAL(this->i, other.i);\n  }\n\n  template <typename OtherType>\n  inline bool operator!=(const Complex<OtherType>& other) const {\n    return !(*this == other);\n  }\n\n  template <typename OtherType>\n  inline bool operator<=(const Complex<OtherType>& other) const {\n    return (*this < other) || (*this == other);\n  }\n\n  template <typename OtherType>\n  inline bool operator>=(const Complex<OtherType>& other) const {\n    return (*this > other) || (*this == other);\n  }\n\n  template <typename OtherType>\n  inline operator Complex<OtherType> () const {\n    return Complex<OtherType>((OtherType)this->r, (OtherType)this->i);\n  }\n\n  ///////////////////////////////\n  // Complex-Native Operations //\n  ///////////////////////////////\n\n  template <typename NativeType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\n  inline Complex<Type> operator+(const NativeType& other) const {\n    return *this + Complex<Type>(other);\n  }\n\n  template <typename NativeType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\n  inline Complex<Type> operator-(const NativeType& other) const {\n    return *this - Complex<Type>(other);\n  }\n\n  template <typename NativeType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\n  inline Complex<Type> operator*(const NativeType& other) const {\n    return *this * Complex<Type>(other);\n  }\n\n  template <typename NativeType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\n  inline Complex<Type> operator/(const NativeType& other) const {\n    return *this / Complex<Type>(other);\n  }\n\n  template <typename NativeType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\n  inline bool operator<(const NativeType& other) const {\n    return *this < Complex<Type>(other);\n  }\n\n  template <typename NativeType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\n  inline bool operator>(const NativeType& other) const {\n    return *this > Complex<Type>(other);\n  }\n\n  template <typename NativeType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\n  inline bool operator==(const NativeType& other) const {\n    return *this == Complex<Type>(other);\n  }\n\n  template <typename NativeType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\n  inline bool operator!=(const NativeType& other) const {\n    return *this != Complex<Type>(other);\n  }\n\n  template <typename NativeType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\n  inline bool operator<=(const NativeType& other) const {\n    return *this <= Complex<Type>(other);\n  }\n\n  template <typename NativeType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\n  inline bool operator>=(const NativeType& other) const {\n    return *this >= Complex<Type>(other);\n  }\n\n  template <typename NativeType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\n  inline operator NativeType () const {\n    return (NativeType)this->r;\n  }\n\n  operator RubyObject () const;\n};\n\n///////////////////////////////\n// Native-Complex Operations //\n///////////////////////////////\n\ntemplate <typename NativeType, typename ComplexType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\ninline Complex<ComplexType> operator+(const NativeType& left, const Complex<ComplexType>& right) {\n  return Complex<ComplexType>(left) + right;\n}\n\ntemplate <typename NativeType, typename ComplexType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\ninline Complex<ComplexType> operator-(const NativeType& left, const Complex<ComplexType>& right) {\n  return Complex<ComplexType>(left) - right;\n}\n\ntemplate <typename NativeType, typename ComplexType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\ninline Complex<ComplexType> operator*(const NativeType& left, const Complex<ComplexType>& right) {\n  return Complex<ComplexType>(left) * right;\n}\n\ntemplate <typename NativeType, typename ComplexType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\ninline Complex<ComplexType> operator/(const NativeType& left, const Complex<ComplexType>& right) {\n  return Complex<ComplexType>(left) / right;\n}\n\ntemplate <typename NativeType, typename ComplexType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\ninline bool operator<(const NativeType left, const Complex<ComplexType>& right) {\n  return Complex<ComplexType>(left) < right;\n}\n\ntemplate <typename NativeType, typename ComplexType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\ninline bool operator>(const NativeType left, const Complex<ComplexType>& right) {\n  return Complex<ComplexType>(left) > right;\n}\n\ntemplate <typename NativeType, typename ComplexType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\ninline bool operator==(const NativeType left, const Complex<ComplexType>& right) {\n  return Complex<ComplexType>(left) == right;\n}\n\ntemplate <typename NativeType, typename ComplexType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\ninline bool operator!=(const NativeType left, const Complex<ComplexType>& right) {\n  return Complex<ComplexType>(left) != right;\n}\n\ntemplate <typename NativeType, typename ComplexType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\ninline bool operator<=(const NativeType left, const Complex<ComplexType>& right) {\n  return Complex<ComplexType>(left) <= right;\n}\n\ntemplate <typename NativeType, typename ComplexType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\ninline bool operator>=(const NativeType left, const Complex<ComplexType>& right) {\n  return Complex<ComplexType>(left) >= right;\n}\n\ntemplate <typename Type>\ninline std::ostream& operator<<(std::ostream& out, const Complex<Type>& rhs) {\n  out << \"(\" << rhs.r << \",\" << rhs.i << \"i)\" << std::flush;\n  return out;\n}\n\n// Negative operator\ntemplate <typename IntType, typename = typename std::enable_if<std::is_integral<IntType>::value>::type>\ninline Complex<IntType> operator-(const Complex<IntType>& rhs) {\n  return Complex<IntType>(-rhs.r, -rhs.i);\n}\n\n} // end of namespace nm\n\nnamespace std {\n  template <typename FloatType, typename = typename std::enable_if<std::is_floating_point<FloatType>::value>::type>\n  nm::Complex<FloatType> piecewise_abs(const nm::Complex<FloatType>& value) {\n    return nm::Complex<FloatType>(value.r < 0 ? -value.r : value.r,\n                                  value.i < 0 ? -value.i : value.i);\n  }\n\n  template <typename FloatType, typename = typename std::enable_if<std::is_floating_point<FloatType>::value>::type>\n  nm::Complex<FloatType> real_abs(const nm::Complex<FloatType>& value) {\n    return nm::Complex<FloatType>(value.r < 0 ? -value.r : value.r,\n                                  value.i);\n  }\n\n  template <typename FloatType, typename = typename std::enable_if<std::is_floating_point<FloatType>::value>::type>\n  nm::Complex<FloatType> imag_abs(const nm::Complex<FloatType>& value) {\n    return nm::Complex<FloatType>(value.r,\n                                  value.i < 0 ? -value.i : value.i);\n  }\n\n  template <typename FloatType, typename = typename std::enable_if<std::is_floating_point<FloatType>::value>::type>\n  double abs(const nm::Complex<FloatType>& value) {\n    return std::sqrt(double(value.r)*double(value.r) + double(value.i)*double(value.i));\n  }\n}\n\n#endif // COMPLEX_H\n"
  },
  {
    "path": "ext/nmatrix/data/data.cpp",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == data.cpp\n//\n// Functions and data for dealing the data types.\n\n/*\n * Standard Includes\n */\n\n#include <ruby.h>\n#include <stdexcept>\n\n/*\n * Project Includes\n */\n\n#include \"types.h\"\n#include \"data.h\"\n\n/*\n * Global Variables\n */\n\nnamespace nm {\n  const char* const EWOP_OPS[nm::NUM_EWOPS] = {\n    \"+\",\n    \"-\",\n    \"*\",\n    \"/\",\n    \"**\",\n    \"%\",\n    \"==\",\n    \"!=\",\n    \"<\",\n    \">\",\n    \"<=\",\n    \">=\"\n  };\n\n  const std::string EWOP_NAMES[nm::NUM_EWOPS] = {\n    \"add\",\n    \"sub\",\n    \"mul\",\n    \"div\",\n    \"pow\",\n    \"mod\",\n    \"eqeq\",\n    \"neq\",\n    \"lt\",\n    \"gt\",\n    \"leq\",\n    \"geq\"\n  };\n\n  const std::string NONCOM_EWOP_NAMES[nm::NUM_NONCOM_EWOPS] = {\n    \"atan2\",\n    \"ldexp\",\n    \"hypot\"\n  };\n\n  const std::string UNARYOPS[nm::NUM_UNARYOPS] = {\n    \"sin\", \"cos\", \"tan\",\n    \"asin\", \"acos\", \"atan\",\n    \"sinh\", \"cosh\", \"tanh\",\n    \"asinh\", \"acosh\", \"atanh\",\n    \"exp\", \"log2\", \n    \"log10\", \"sqrt\", \"erf\", \n    \"erfc\", \"cbrt\", \"gamma\",\n    \"negate\", \"floor\", \"ceil\", \"round\"\n  };\n\n\n  /*\n   * Create a RubyObject from a regular C value (given a dtype). Does not return a VALUE! To get a VALUE, you need to\n   * look at the rval property of what this function returns.\n   */\n  nm::RubyObject rubyobj_from_cval(void* val, nm::dtype_t dtype) {\n    using namespace nm;\n    switch (dtype) {\n      case BYTE:\n        return RubyObject(*reinterpret_cast<uint8_t*>(val));\n\n      case INT8:\n        return RubyObject(*reinterpret_cast<int8_t*>(val));\n\n      case INT16:\n        return RubyObject(*reinterpret_cast<int16_t*>(val));\n\n      case INT32:\n        return RubyObject(*reinterpret_cast<int32_t*>(val));\n\n      case INT64:\n        return RubyObject(*reinterpret_cast<int64_t*>(val));\n\n      case FLOAT32:\n        return RubyObject(*reinterpret_cast<float32_t*>(val));\n\n      case FLOAT64:\n        return RubyObject(*reinterpret_cast<float64_t*>(val));\n\n      case COMPLEX64:\n        return RubyObject(*reinterpret_cast<Complex64*>(val));\n\n      case COMPLEX128:\n        return RubyObject(*reinterpret_cast<Complex128*>(val));\n\n      default:\n        try {\n          throw std::logic_error(\"Cannot create ruby object\");\n        }\n        catch (std::logic_error err) {\n          printf(\"%s\\n\", err.what());\n        }\n\n        rb_raise(nm_eDataTypeError, \"Conversion to RubyObject requested from unknown/invalid data type (did you try to convert from a VALUE?)\");\n    }\n    return Qnil;\n  }\n} // end of namespace nm\n\nextern \"C\" {\n\nconst char* const DTYPE_NAMES[nm::NUM_DTYPES] = {\n  \"byte\",\n  \"int8\",\n  \"int16\",\n  \"int32\",\n  \"int64\",\n  \"float32\",\n  \"float64\",\n  \"complex64\",\n  \"complex128\",\n  \"object\"\n};\n\n\nconst size_t DTYPE_SIZES[nm::NUM_DTYPES] = {\n  sizeof(uint8_t),\n  sizeof(int8_t),\n  sizeof(int16_t),\n  sizeof(int32_t),\n  sizeof(int64_t),\n  sizeof(float32_t),\n  sizeof(float64_t),\n  sizeof(nm::Complex64),\n  sizeof(nm::Complex128),\n  sizeof(nm::RubyObject)\n};\n\n\nconst nm::dtype_t Upcast[nm::NUM_DTYPES][nm::NUM_DTYPES] = {\n  { nm::BYTE, nm::INT16, nm::INT16, nm::INT32, nm::INT64, nm::FLOAT32, nm::FLOAT64, nm::COMPLEX64, nm::COMPLEX128, nm::RUBYOBJ},\n  { nm::INT16, nm::INT8, nm::INT16, nm::INT32, nm::INT64, nm::FLOAT32, nm::FLOAT64, nm::COMPLEX64, nm::COMPLEX128, nm::RUBYOBJ},\n  { nm::INT16, nm::INT16, nm::INT16, nm::INT32, nm::INT64, nm::FLOAT32, nm::FLOAT64, nm::COMPLEX64, nm::COMPLEX128, nm::RUBYOBJ},\n  { nm::INT32, nm::INT32, nm::INT32, nm::INT32, nm::INT64, nm::FLOAT32, nm::FLOAT64, nm::COMPLEX64, nm::COMPLEX128, nm::RUBYOBJ},\n  { nm::INT64, nm::INT64, nm::INT64, nm::INT64, nm::INT64, nm::FLOAT32, nm::FLOAT64, nm::COMPLEX64, nm::COMPLEX128, nm::RUBYOBJ},\n  { nm::FLOAT32, nm::FLOAT32, nm::FLOAT32, nm::FLOAT32, nm::FLOAT32, nm::FLOAT32, nm::FLOAT64, nm::COMPLEX64, nm::COMPLEX128, nm::RUBYOBJ},\n  { nm::FLOAT64, nm::FLOAT64, nm::FLOAT64, nm::FLOAT64, nm::FLOAT64, nm::FLOAT64, nm::FLOAT64, nm::COMPLEX128, nm::COMPLEX128, nm::RUBYOBJ},\n  { nm::COMPLEX64, nm::COMPLEX64, nm::COMPLEX64, nm::COMPLEX64, nm::COMPLEX64, nm::COMPLEX64, nm::COMPLEX128, nm::COMPLEX64, nm::COMPLEX128, nm::RUBYOBJ},\n  { nm::COMPLEX128, nm::COMPLEX128, nm::COMPLEX128, nm::COMPLEX128, nm::COMPLEX128, nm::COMPLEX128, nm::COMPLEX128, nm::COMPLEX128, nm::COMPLEX128, nm::RUBYOBJ},\n  { nm::RUBYOBJ, nm::RUBYOBJ, nm::RUBYOBJ, nm::RUBYOBJ, nm::RUBYOBJ, nm::RUBYOBJ, nm::RUBYOBJ, nm::RUBYOBJ, nm::RUBYOBJ, nm::RUBYOBJ}\n};\n\n\n/*\n * Forward Declarations\n */\n\n/*\n * Functions\n */\n\n/*\n * Converts a RubyObject\n */\nvoid rubyval_to_cval(VALUE val, nm::dtype_t dtype, void* loc) {\n  using namespace nm;\n  switch (dtype) {\n    case nm::BYTE:\n      *reinterpret_cast<uint8_t*>(loc)      = static_cast<uint8_t>(RubyObject(val));\n      break;\n\n    case nm::INT8:\n      *reinterpret_cast<int8_t*>(loc)        = static_cast<int8_t>(RubyObject(val));\n      break;\n\n    case nm::INT16:\n      *reinterpret_cast<int16_t*>(loc)      = static_cast<int16_t>(RubyObject(val));\n      break;\n\n    case nm::INT32:\n      *reinterpret_cast<int32_t*>(loc)      = static_cast<int32_t>(RubyObject(val));\n      break;\n\n    case nm::INT64:\n      *reinterpret_cast<int64_t*>(loc)      = static_cast<int64_t>(RubyObject(val));\n      break;\n\n    case nm::FLOAT32:\n      *reinterpret_cast<float32_t*>(loc)    = static_cast<float32_t>(RubyObject(val));\n      break;\n\n    case nm::FLOAT64:\n      *reinterpret_cast<float64_t*>(loc)    = static_cast<float64_t>(RubyObject(val));\n      break;\n\n    case nm::COMPLEX64:\n      *reinterpret_cast<Complex64*>(loc)    = RubyObject(val).to<Complex64>();\n      break;\n\n    case nm::COMPLEX128:\n      *reinterpret_cast<Complex128*>(loc)    = RubyObject(val).to<Complex128>();\n      break;\n\n    case RUBYOBJ:\n      *reinterpret_cast<VALUE*>(loc)        = val;\n      //rb_raise(rb_eTypeError, \"Attempting a bad conversion from a Ruby value.\");\n      break;\n\n    default:\n      rb_raise(rb_eTypeError, \"Attempting a bad conversion.\");\n      break;\n  }\n}\n\n\n\n\n/*\n * Allocate and return a piece of data of the correct dtype, converted from a\n * given RubyObject.\n */\nvoid* rubyobj_to_cval(VALUE val, nm::dtype_t dtype) {\n  size_t size =  DTYPE_SIZES[dtype];\n  NM_CONSERVATIVE(nm_register_value(&val));\n  void* ret_val = NM_ALLOC_N(char, size);\n\n  rubyval_to_cval(val, dtype, ret_val);\n  NM_CONSERVATIVE(nm_unregister_value(&val));\n  return ret_val;\n}\n\n\nvoid nm_init_data() {\n  volatile VALUE t = INT2FIX(1);\n  volatile nm::RubyObject obj(t);\n  volatile nm::Complex64 a(const_cast<nm::RubyObject&>(obj));\n  volatile nm::Complex128 b(const_cast<nm::RubyObject&>(obj));\n}\n\n\n} // end of extern \"C\" block\n"
  },
  {
    "path": "ext/nmatrix/data/data.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == data.h\n//\n// Header file for dealing with data types.\n\n#ifndef DATA_H\n#define DATA_H\n\n/*\n * Standard Includes\n */\n\n#include <ruby.h>\n#include <string>\n\n/*\n * Project Includes\n */\n\n#include \"nmatrix.h\"\n\n#include \"types.h\"\n\n#include \"complex.h\"\n#include \"ruby_object.h\"\n\nnamespace nm {\n\n\n  /*\n   * Constants\n   */\n\n  const int NUM_DTYPES = 10;\n  const int NUM_ITYPES = 4;\n  const int NUM_EWOPS = 12;\n  const int NUM_UNARYOPS = 24;\n  const int NUM_NONCOM_EWOPS = 3;\n\n  enum ewop_t {\n    EW_ADD,\n    EW_SUB,\n    EW_MUL,\n    EW_DIV,\n    EW_POW,\n    EW_MOD,\n    EW_EQEQ,\n    EW_NEQ,\n    EW_LT,\n    EW_GT,\n    EW_LEQ,\n    EW_GEQ,\n  };\n\n  enum noncom_ewop_t {\n    NONCOM_EW_ATAN2,\n    NONCOM_EW_LDEXP,\n    NONCOM_EW_HYPOT\n  };\n\n  enum unaryop_t {\n    UNARY_SIN,\n    UNARY_COS,\n    UNARY_TAN,\n    UNARY_ASIN,\n    UNARY_ACOS,\n    UNARY_ATAN,\n    UNARY_SINH,\n    UNARY_COSH,\n    UNARY_TANH,\n    UNARY_ASINH,\n    UNARY_ACOSH,\n    UNARY_ATANH,\n    UNARY_EXP,\n    UNARY_LOG2,\n    UNARY_LOG10,\n    UNARY_SQRT,\n    UNARY_ERF,\n    UNARY_ERFC,\n    UNARY_CBRT,\n    UNARY_GAMMA,\n    UNARY_NEGATE,\n    UNARY_FLOOR,\n    UNARY_CEIL,\n    UNARY_ROUND\n  };\n\n  // element-wise and scalar operators\n  extern const char* const  EWOP_OPS[nm::NUM_EWOPS];\n  extern const std::string  EWOP_NAMES[nm::NUM_EWOPS];\n  extern const std::string  UNARYOPS[nm::NUM_UNARYOPS];\n  extern const std::string  NONCOM_EWOP_NAMES[nm::NUM_NONCOM_EWOPS];\n\n\n  template <typename Type>\n  Complex<Type>::Complex(const RubyObject& other) {\n    *this = other;\n  }\n\n  template <typename Type>\n  Complex<Type>& Complex<Type>::operator=(const RubyObject& other) {\n    if (RB_TYPE_P(other.rval, T_COMPLEX)) {\n      this->r = NUM2DBL(rb_funcall(other.rval, rb_intern(\"real\"), 0));\n      this->i = NUM2DBL(rb_funcall(other.rval, rb_intern(\"imag\"), 0));\n    }\n    else if (RB_TYPE_P(other.rval, T_FLOAT) ||\n             RB_TYPE_P(other.rval, T_FIXNUM) ||\n             RB_TYPE_P(other.rval, T_BIGNUM)) {\n      this->r = NUM2DBL(other.rval);\n      this->i = 0.0;\n    }\n    else {\n      rb_raise(rb_eTypeError, \"not sure how to convert this type of VALUE to a complex\");\n    }\n    return *this;\n  }\n\n  template<typename Type>\n  Complex<Type>::operator RubyObject () const {\n    return RubyObject(*this);\n  }\n\n  nm::RubyObject  rubyobj_from_cval(void* val, nm::dtype_t dtype);\n} // end of namespace nm\n\n/*\n * Macros\n */\n\n#define STYPE_MARK_TABLE(name)                  \\\n  static void (*(name)[nm::NUM_STYPES])(STORAGE*) = {  \\\n    nm_dense_storage_mark,                      \\\n    nm_list_storage_mark,                        \\\n    nm_yale_storage_mark                        \\\n  };\n\n#define STYPE_REGISTER_TABLE(name)              \\\n  static void (*(name)[nm::NUM_STYPES])(const STORAGE*) = { \\\n    nm_dense_storage_register,                  \\\n    nm_list_storage_register,                   \\\n    nm_yale_storage_register                    \\\n  };\n\n#define STYPE_UNREGISTER_TABLE(name)              \\\n  static void (*(name)[nm::NUM_STYPES])(const STORAGE*) = { \\\n    nm_dense_storage_unregister,                \\\n    nm_list_storage_unregister,                 \\\n    nm_yale_storage_unregister                  \\\n  };\n\n#define CAST_TABLE(name)                                                   \\\n  static STORAGE* (*(name)[nm::NUM_STYPES][nm::NUM_STYPES])(const STORAGE*, nm::dtype_t, void*) = {      \\\n    { nm_dense_storage_cast_copy,  nm_dense_storage_from_list,  nm_dense_storage_from_yale },  \\\n    { nm_list_storage_from_dense,  nm_list_storage_cast_copy,   nm_list_storage_from_yale  },  \\\n    { nm_yale_storage_from_dense,  nm_yale_storage_from_list,   nm_yale_storage_cast_copy  }   \\\n  };\n\n/*\n * Defines a static array that hold function pointers to dtype templated\n * versions of the specified function.\n */\n#define DTYPE_TEMPLATE_TABLE(fun, ret, ...) NAMED_DTYPE_TEMPLATE_TABLE(ttable, fun, ret, __VA_ARGS__)\n\n#define NAMED_DTYPE_TEMPLATE_TABLE(name, fun, ret, ...) \\\n  static ret (*(name)[nm::NUM_DTYPES])(__VA_ARGS__) =  { \\\n    fun<uint8_t>,                                        \\\n    fun<int8_t>,                                        \\\n    fun<int16_t>,                                        \\\n    fun<int32_t>,                                        \\\n    fun<int64_t>,                                        \\\n    fun<float32_t>,                                      \\\n    fun<float64_t>,                                      \\\n    fun<nm::Complex64>,                                  \\\n    fun<nm::Complex128>,                                \\\n    fun<nm::RubyObject>                                 \\\n  };\n\n#define DTYPE_OBJECT_STATIC_TABLE(obj, fun, ret, ...)     \\\n  static ret (*(ttable)[nm::NUM_DTYPES])(__VA_ARGS__) =  { \\\n    obj<uint8_t>::fun,                                  \\\n    obj<int8_t>::fun,                                    \\\n    obj<int16_t>::fun,                                  \\\n    obj<int32_t>::fun,                                  \\\n    obj<int64_t>::fun,                                  \\\n    obj<float32_t>::fun,                                \\\n    obj<float64_t>::fun,                                \\\n    obj<nm::Complex64>::fun,                            \\\n    obj<nm::Complex128>::fun,                            \\\n    obj<nm::RubyObject>::fun                            \\\n  };\n\n#define NAMED_DTYPE_TEMPLATE_TABLE_NO_ROBJ(name, fun, ret, ...) \\\n  static ret (*(name)[nm::NUM_DTYPES])(__VA_ARGS__) =  {      \\\n    fun<uint8_t>,                                        \\\n    fun<int8_t>,                                        \\\n    fun<int16_t>,                                        \\\n    fun<int32_t>,                                        \\\n    fun<int64_t>,                                        \\\n    fun<float32_t>,                                      \\\n    fun<float64_t>,                                      \\\n    fun<nm::Complex64>,                                  \\\n    fun<nm::Complex128>                                \\\n  };\n\n\n/*\n * Same as DTYPE_TEMPLATE_TABLE but for functions that have two template\n * parameters.\n *\n * The left-hand DType is used as the first index, and the right-hand side is\n * the second index.  Not all left- and right-hand side combinations are valid,\n * and an invalid combination will result in a NULL pointer.\n */\n#define LR_DTYPE_TEMPLATE_TABLE(fun, ret, ...) NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, fun, ret, __VA_ARGS__)\n\n#define NAMED_LR_DTYPE_TEMPLATE_TABLE(name, fun, ret, ...)                                                                                                                \\\n  static ret (*(name)[nm::NUM_DTYPES][nm::NUM_DTYPES])(__VA_ARGS__) = {  \\\n    {fun<uint8_t, uint8_t>, fun<uint8_t, int8_t>, fun<uint8_t, int16_t>, fun<uint8_t, int32_t>, fun<uint8_t, int64_t>, fun<uint8_t, float32_t>, fun<uint8_t, float64_t>, fun<uint8_t, nm::Complex64>, fun<uint8_t, nm::Complex128>, fun<uint8_t, nm::RubyObject>}, \\\n    {fun<int8_t, uint8_t>, fun<int8_t, int8_t>, fun<int8_t, int16_t>, fun<int8_t, int32_t>, fun<int8_t, int64_t>, fun<int8_t, float32_t>, fun<int8_t, float64_t>, fun<int8_t, nm::Complex64>, fun<int8_t, nm::Complex128>, fun<int8_t, nm::RubyObject>},               \\\n    {fun<int16_t, uint8_t>, fun<int16_t, int8_t>, fun<int16_t, int16_t>, fun<int16_t, int32_t>, fun<int16_t, int64_t>, fun<int16_t, float32_t>, fun<int16_t, float64_t>, fun<int16_t, nm::Complex64>, fun<int16_t, nm::Complex128>, fun<int16_t, nm::RubyObject>},  \\\n    {fun<int32_t, uint8_t>, fun<int32_t, int8_t>, fun<int32_t, int16_t>, fun<int32_t, int32_t>, fun<int32_t, int64_t>, fun<int32_t, float32_t>, fun<int32_t, float64_t>, fun<int32_t, nm::Complex64>, fun<int32_t, nm::Complex128>, fun<int32_t, nm::RubyObject>},  \\\n    {fun<int64_t, uint8_t>, fun<int64_t, int8_t>, fun<int64_t, int16_t>, fun<int64_t, int32_t>, fun<int64_t, int64_t>, fun<int64_t, float32_t>, fun<int64_t, float64_t>, fun<int64_t, nm::Complex64>, fun<int64_t, nm::Complex128>, fun<int64_t, nm::RubyObject>},  \\\n    {fun<float32_t, uint8_t>, fun<float32_t, int8_t>, fun<float32_t, int16_t>, fun<float32_t, int32_t>, fun<float32_t, int64_t>, fun<float32_t, float32_t>, fun<float32_t, float64_t>, fun<float32_t, nm::Complex64>, fun<float32_t, nm::Complex128>, fun<float32_t, nm::RubyObject>},  \\\n    {fun<float64_t, uint8_t>, fun<float64_t, int8_t>, fun<float64_t, int16_t>, fun<float64_t, int32_t>, fun<float64_t, int64_t>, fun<float64_t, float32_t>, fun<float64_t, float64_t>, fun<float64_t, nm::Complex64>, fun<float64_t, nm::Complex128>, fun<float64_t, nm::RubyObject>},  \\\n    {fun<nm::Complex64, uint8_t>, fun<nm::Complex64, int8_t>, fun<nm::Complex64, int16_t>, fun<nm::Complex64, int32_t>, fun<nm::Complex64, int64_t>, fun<nm::Complex64, float32_t>, fun<nm::Complex64, float64_t>, fun<nm::Complex64, nm::Complex64>, fun<nm::Complex64, nm::Complex128>, fun<nm::Complex64, nm::RubyObject>},               \\\n    {fun<nm::Complex128, uint8_t>, fun<nm::Complex128, int8_t>, fun<nm::Complex128, int16_t>, fun<nm::Complex128, int32_t>, fun<nm::Complex128, int64_t>, fun<nm::Complex128, float32_t>, fun<nm::Complex128, float64_t>, fun<nm::Complex128, nm::Complex64>, fun<nm::Complex128, nm::Complex128>, fun<nm::Complex128, nm::RubyObject>},  \\\n    {fun<nm::RubyObject, uint8_t>, fun<nm::RubyObject, int8_t>, fun<nm::RubyObject, int16_t>, fun<nm::RubyObject, int32_t>, fun<nm::RubyObject, int64_t>, fun<nm::RubyObject, float32_t>, fun<nm::RubyObject, float64_t>, fun<nm::RubyObject, nm::Complex64>, fun<nm::RubyObject, nm::Complex128>, fun<nm::RubyObject, nm::RubyObject>}   \\\n  };\n\n/*\n * Defines a static array that holds function pointers to operation, and left-\n * and right-side dtype templated version sof the specified function.\n */\n#define OP_LR_DTYPE_TEMPLATE_TABLE(fun, ret, ...) NAMED_OP_LR_DTYPE_TEMPLATE_TABLE(ttable, fun, ret, __VA_ARGS__)\n\n#define NAMED_OP_LR_DTYPE_TEMPLATE_TABLE(name, fun, ret, ...)                                                                                                               \\\n  static ret (*(name)[nm::NUM_EWOPS][nm::NUM_DTYPES][nm::NUM_DTYPES])(__VA_ARGS__) = {                                                                                                  \\\n    {                                                                                                                                                                        \\\n      {fun<nm::EW_ADD, uint8_t, uint8_t>, fun<nm::EW_ADD, uint8_t, int8_t>, fun<nm::EW_ADD, uint8_t, int16_t>, fun<nm::EW_ADD, uint8_t, int32_t>, fun<nm::EW_ADD, uint8_t, int64_t>,            \\\n        fun<nm::EW_ADD, uint8_t, float32_t>, fun<nm::EW_ADD, uint8_t, float64_t>, fun<nm::EW_ADD, uint8_t, nm::Complex64>, fun<nm::EW_ADD, uint8_t, nm::Complex128>,                        \\\n        fun<nm::EW_ADD, int8_t, float32_t>, fun<nm::EW_ADD, int8_t, float64_t>, fun<nm::EW_ADD, int8_t, nm::Complex64>, fun<nm::EW_ADD, int8_t, nm::Complex128>,                            \\\n         NULL},                                              \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_ADD, int16_t, uint8_t>, fun<nm::EW_ADD, int16_t, int8_t>, fun<nm::EW_ADD, int16_t, int16_t>, fun<nm::EW_ADD, int16_t, int32_t>, fun<nm::EW_ADD, int16_t, int64_t>,            \\\n        fun<nm::EW_ADD, int16_t, float32_t>, fun<nm::EW_ADD, int16_t, float64_t>, fun<nm::EW_ADD, int16_t, nm::Complex64>, fun<nm::EW_ADD, int16_t, nm::Complex128>,                        \\\n         NULL},                                            \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_ADD, int32_t, uint8_t>, fun<nm::EW_ADD, int32_t, int8_t>, fun<nm::EW_ADD, int32_t, int16_t>, fun<nm::EW_ADD, int32_t, int32_t>, fun<nm::EW_ADD, int32_t, int64_t>,            \\\n        fun<nm::EW_ADD, int32_t, float32_t>, fun<nm::EW_ADD, int32_t, float64_t>, fun<nm::EW_ADD, int32_t, nm::Complex64>, fun<nm::EW_ADD, int32_t, nm::Complex128>,                        \\\n         NULL},                                            \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_ADD, int64_t, uint8_t>, fun<nm::EW_ADD, int64_t, int8_t>, fun<nm::EW_ADD, int64_t, int16_t>, fun<nm::EW_ADD, int64_t, int32_t>, fun<nm::EW_ADD, int64_t, int64_t>,            \\\n        fun<nm::EW_ADD, int64_t, float32_t>, fun<nm::EW_ADD, int64_t, float64_t>, fun<nm::EW_ADD, int64_t, nm::Complex64>, fun<nm::EW_ADD, int64_t, nm::Complex128>,                        \\\n         NULL},                                           \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_ADD, float32_t, uint8_t>, fun<nm::EW_ADD, float32_t, int8_t>, fun<nm::EW_ADD, float32_t, int16_t>, fun<nm::EW_ADD, float32_t, int32_t>, fun<nm::EW_ADD, float32_t, int64_t>,  \\\n        fun<nm::EW_ADD, float32_t, float32_t>, fun<nm::EW_ADD, float32_t, float64_t>, fun<nm::EW_ADD, float32_t, nm::Complex64>, fun<nm::EW_ADD, float32_t, nm::Complex128>,                \\\n         NULL},                                      \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_ADD, float64_t, uint8_t>, fun<nm::EW_ADD, float64_t, int8_t>, fun<nm::EW_ADD, float64_t, int16_t>, fun<nm::EW_ADD, float64_t, int32_t>, fun<nm::EW_ADD, float64_t, int64_t>,  \\\n        fun<nm::EW_ADD, float64_t, float32_t>, fun<nm::EW_ADD, float64_t, float64_t>, fun<nm::EW_ADD, float64_t, nm::Complex64>, fun<nm::EW_ADD, float64_t, nm::Complex128>,                \\\n         NULL},                                      \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_ADD, nm::Complex64, uint8_t>, fun<nm::EW_ADD, nm::Complex64, int8_t>, fun<nm::EW_ADD, nm::Complex64, int16_t>, fun<nm::EW_ADD, nm::Complex64, int32_t>,                    \\\n        fun<nm::EW_ADD, nm::Complex64, int64_t>, fun<nm::EW_ADD, nm::Complex64, float32_t>, fun<nm::EW_ADD, nm::Complex64, float64_t>, fun<nm::EW_ADD, nm::Complex64, nm::Complex64>,        \\\n        fun<nm::EW_ADD, nm::Complex64, nm::Complex128>,                                  \\\n         NULL},                                                                                                                  \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_ADD, nm::Complex128, uint8_t>, fun<nm::EW_ADD, nm::Complex128, int8_t>, fun<nm::EW_ADD, nm::Complex128, int16_t>, fun<nm::EW_ADD, nm::Complex128, int32_t>,                \\\n        fun<nm::EW_ADD, nm::Complex128, int64_t>, fun<nm::EW_ADD, nm::Complex128, float32_t>, fun<nm::EW_ADD, nm::Complex128, float64_t>, fun<nm::EW_ADD, nm::Complex128, nm::Complex64>,    \\\n        fun<nm::EW_ADD, nm::Complex128, nm::Complex128>,                                \\\n         NULL},                                                                                                                \\\n      {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, fun<nm::EW_ADD, nm::RubyObject, nm::RubyObject>}                                                  \\\n    },                                                                                                                                                                      \\\n                                                                                                                                                                            \\\n    {                                                                                                                                                                        \\\n      {fun<nm::EW_SUB, uint8_t, uint8_t>, fun<nm::EW_SUB, uint8_t, int8_t>, fun<nm::EW_SUB, uint8_t, int16_t>, fun<nm::EW_SUB, uint8_t, int32_t>, fun<nm::EW_SUB, uint8_t, int64_t>,            \\\n        fun<nm::EW_SUB, uint8_t, float32_t>, fun<nm::EW_SUB, uint8_t, float64_t>, fun<nm::EW_SUB, uint8_t, nm::Complex64>, fun<nm::EW_SUB, uint8_t, nm::Complex128>,                        \\\n         NULL},                                            \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_SUB, int8_t, uint8_t>, fun<nm::EW_SUB, int8_t, int8_t>, fun<nm::EW_SUB, int8_t, int16_t>, fun<nm::EW_SUB, int8_t, int32_t>, fun<nm::EW_SUB, int8_t, int64_t>,                  \\\n        fun<nm::EW_SUB, int8_t, float32_t>, fun<nm::EW_SUB, int8_t, float64_t>, fun<nm::EW_SUB, int8_t, nm::Complex64>, fun<nm::EW_SUB, int8_t, nm::Complex128>,                            \\\n         NULL},                                              \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_SUB, int16_t, uint8_t>, fun<nm::EW_SUB, int16_t, int8_t>, fun<nm::EW_SUB, int16_t, int16_t>, fun<nm::EW_SUB, int16_t, int32_t>, fun<nm::EW_SUB, int16_t, int64_t>,            \\\n        fun<nm::EW_SUB, int16_t, float32_t>, fun<nm::EW_SUB, int16_t, float64_t>, fun<nm::EW_SUB, int16_t, nm::Complex64>, fun<nm::EW_SUB, int16_t, nm::Complex128>,                        \\\n         NULL},                                            \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_SUB, int32_t, uint8_t>, fun<nm::EW_SUB, int32_t, int8_t>, fun<nm::EW_SUB, int32_t, int16_t>, fun<nm::EW_SUB, int32_t, int32_t>, fun<nm::EW_SUB, int32_t, int64_t>,            \\\n        fun<nm::EW_SUB, int32_t, float32_t>, fun<nm::EW_SUB, int32_t, float64_t>, fun<nm::EW_SUB, int32_t, nm::Complex64>, fun<nm::EW_SUB, int32_t, nm::Complex128>,                        \\\n         NULL},                                            \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_SUB, int64_t, uint8_t>, fun<nm::EW_SUB, int64_t, int8_t>, fun<nm::EW_SUB, int64_t, int16_t>, fun<nm::EW_SUB, int64_t, int32_t>, fun<nm::EW_SUB, int64_t, int64_t>,            \\\n        fun<nm::EW_SUB, int64_t, float32_t>, fun<nm::EW_SUB, int64_t, float64_t>, fun<nm::EW_SUB, int64_t, nm::Complex64>, fun<nm::EW_SUB, int64_t, nm::Complex128>,                        \\\n         NULL},                                           \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_SUB, float32_t, uint8_t>, fun<nm::EW_SUB, float32_t, int8_t>, fun<nm::EW_SUB, float32_t, int16_t>, fun<nm::EW_SUB, float32_t, int32_t>, fun<nm::EW_SUB, float32_t, int64_t>,  \\\n        fun<nm::EW_SUB, float32_t, float32_t>, fun<nm::EW_SUB, float32_t, float64_t>, fun<nm::EW_SUB, float32_t, nm::Complex64>, fun<nm::EW_SUB, float32_t, nm::Complex128>,                \\\n         NULL},                                      \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_SUB, float64_t, uint8_t>, fun<nm::EW_SUB, float64_t, int8_t>, fun<nm::EW_SUB, float64_t, int16_t>, fun<nm::EW_SUB, float64_t, int32_t>, fun<nm::EW_SUB, float64_t, int64_t>,  \\\n        fun<nm::EW_SUB, float64_t, float32_t>, fun<nm::EW_SUB, float64_t, float64_t>, fun<nm::EW_SUB, float64_t, nm::Complex64>, fun<nm::EW_SUB, float64_t, nm::Complex128>,                \\\n         NULL},                                      \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_SUB, nm::Complex64, uint8_t>, fun<nm::EW_SUB, nm::Complex64, int8_t>, fun<nm::EW_SUB, nm::Complex64, int16_t>, fun<nm::EW_SUB, nm::Complex64, int32_t>,                    \\\n        fun<nm::EW_SUB, nm::Complex64, int64_t>, fun<nm::EW_SUB, nm::Complex64, float32_t>, fun<nm::EW_SUB, nm::Complex64, float64_t>, fun<nm::EW_SUB, nm::Complex64, nm::Complex64>,        \\\n        fun<nm::EW_SUB, nm::Complex64, nm::Complex128>,                                  \\\n         NULL},                                                                                                                  \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_SUB, nm::Complex128, uint8_t>, fun<nm::EW_SUB, nm::Complex128, int8_t>, fun<nm::EW_SUB, nm::Complex128, int16_t>, fun<nm::EW_SUB, nm::Complex128, int32_t>,                \\\n        fun<nm::EW_SUB, nm::Complex128, int64_t>, fun<nm::EW_SUB, nm::Complex128, float32_t>, fun<nm::EW_SUB, nm::Complex128, float64_t>, fun<nm::EW_SUB, nm::Complex128, nm::Complex64>,    \\\n        fun<nm::EW_SUB, nm::Complex128, nm::Complex128>,                                \\\n                                                                                                                                                                            \\\n      {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, fun<nm::EW_SUB, nm::RubyObject, nm::RubyObject>}                                                  \\\n    },                                                                                                                                                                      \\\n                                                                                                                                                                            \\\n    {                                                                                                                                                                        \\\n      {fun<nm::EW_MUL, uint8_t, uint8_t>, fun<nm::EW_MUL, uint8_t, int8_t>, fun<nm::EW_MUL, uint8_t, int16_t>, fun<nm::EW_MUL, uint8_t, int32_t>, fun<nm::EW_MUL, uint8_t, int64_t>,            \\\n        fun<nm::EW_MUL, uint8_t, float32_t>, fun<nm::EW_MUL, uint8_t, float64_t>, fun<nm::EW_MUL, uint8_t, nm::Complex64>, fun<nm::EW_MUL, uint8_t, nm::Complex128>,                        \\\n         NULL},                                            \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_MUL, int8_t, uint8_t>, fun<nm::EW_MUL, int8_t, int8_t>, fun<nm::EW_MUL, int8_t, int16_t>, fun<nm::EW_MUL, int8_t, int32_t>, fun<nm::EW_MUL, int8_t, int64_t>,                  \\\n        fun<nm::EW_MUL, int8_t, float32_t>, fun<nm::EW_MUL, int8_t, float64_t>, fun<nm::EW_MUL, int8_t, nm::Complex64>, fun<nm::EW_MUL, int8_t, nm::Complex128>,                            \\\n         NULL},                                              \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_MUL, int16_t, uint8_t>, fun<nm::EW_MUL, int16_t, int8_t>, fun<nm::EW_MUL, int16_t, int16_t>, fun<nm::EW_MUL, int16_t, int32_t>, fun<nm::EW_MUL, int16_t, int64_t>,            \\\n        fun<nm::EW_MUL, int16_t, float32_t>, fun<nm::EW_MUL, int16_t, float64_t>, fun<nm::EW_MUL, int16_t, nm::Complex64>, fun<nm::EW_MUL, int16_t, nm::Complex128>,                        \\\n         NULL},                                            \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_MUL, int32_t, uint8_t>, fun<nm::EW_MUL, int32_t, int8_t>, fun<nm::EW_MUL, int32_t, int16_t>, fun<nm::EW_MUL, int32_t, int32_t>, fun<nm::EW_MUL, int32_t, int64_t>,            \\\n        fun<nm::EW_MUL, int32_t, float32_t>, fun<nm::EW_MUL, int32_t, float64_t>, fun<nm::EW_MUL, int32_t, nm::Complex64>, fun<nm::EW_MUL, int32_t, nm::Complex128>,                        \\\n         NULL},                                            \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_MUL, int64_t, uint8_t>, fun<nm::EW_MUL, int64_t, int8_t>, fun<nm::EW_MUL, int64_t, int16_t>, fun<nm::EW_MUL, int64_t, int32_t>, fun<nm::EW_MUL, int64_t, int64_t>,            \\\n        fun<nm::EW_MUL, int64_t, float32_t>, fun<nm::EW_MUL, int64_t, float64_t>, fun<nm::EW_MUL, int64_t, nm::Complex64>, fun<nm::EW_MUL, int64_t, nm::Complex128>,                        \\\n         NULL},                                           \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_MUL, float32_t, uint8_t>, fun<nm::EW_MUL, float32_t, int8_t>, fun<nm::EW_MUL, float32_t, int16_t>, fun<nm::EW_MUL, float32_t, int32_t>, fun<nm::EW_MUL, float32_t, int64_t>,  \\\n        fun<nm::EW_MUL, float32_t, float32_t>, fun<nm::EW_MUL, float32_t, float64_t>, fun<nm::EW_MUL, float32_t, nm::Complex64>, fun<nm::EW_MUL, float32_t, nm::Complex128>,                \\\n         NULL},                                      \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_MUL, float64_t, uint8_t>, fun<nm::EW_MUL, float64_t, int8_t>, fun<nm::EW_MUL, float64_t, int16_t>, fun<nm::EW_MUL, float64_t, int32_t>, fun<nm::EW_MUL, float64_t, int64_t>,  \\\n        fun<nm::EW_MUL, float64_t, float32_t>, fun<nm::EW_MUL, float64_t, float64_t>, fun<nm::EW_MUL, float64_t, nm::Complex64>, fun<nm::EW_MUL, float64_t, nm::Complex128>,                \\\n         NULL},                                      \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_MUL, nm::Complex64, uint8_t>, fun<nm::EW_MUL, nm::Complex64, int8_t>, fun<nm::EW_MUL, nm::Complex64, int16_t>, fun<nm::EW_MUL, nm::Complex64, int32_t>,                    \\\n        fun<nm::EW_MUL, nm::Complex64, int64_t>, fun<nm::EW_MUL, nm::Complex64, float32_t>, fun<nm::EW_MUL, nm::Complex64, float64_t>, fun<nm::EW_MUL, nm::Complex64, nm::Complex64>,        \\\n        fun<nm::EW_MUL, nm::Complex64, nm::Complex128>,                                  \\\n         NULL},                                                                                                                  \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_MUL, nm::Complex128, uint8_t>, fun<nm::EW_MUL, nm::Complex128, int8_t>, fun<nm::EW_MUL, nm::Complex128, int16_t>, fun<nm::EW_MUL, nm::Complex128, int32_t>,                \\\n        fun<nm::EW_MUL, nm::Complex128, int64_t>, fun<nm::EW_MUL, nm::Complex128, float32_t>, fun<nm::EW_MUL, nm::Complex128, float64_t>, fun<nm::EW_MUL, nm::Complex128, nm::Complex64>,    \\\n        fun<nm::EW_MUL, nm::Complex128, nm::Complex128>,                                \\\n                                                                                                                                                                            \\\n    {                                                                                                                                                                        \\\n      {fun<nm::EW_DIV, uint8_t, uint8_t>, fun<nm::EW_DIV, uint8_t, int8_t>, fun<nm::EW_DIV, uint8_t, int16_t>, fun<nm::EW_DIV, uint8_t, int32_t>, fun<nm::EW_DIV, uint8_t, int64_t>,            \\\n        fun<nm::EW_DIV, uint8_t, float32_t>, fun<nm::EW_DIV, uint8_t, float64_t>, fun<nm::EW_DIV, uint8_t, nm::Complex64>, fun<nm::EW_DIV, uint8_t, nm::Complex128>,                        \\\n         NULL},                                            \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_DIV, int8_t, uint8_t>, fun<nm::EW_DIV, int8_t, int8_t>, fun<nm::EW_DIV, int8_t, int16_t>, fun<nm::EW_DIV, int8_t, int32_t>, fun<nm::EW_DIV, int8_t, int64_t>,                  \\\n        fun<nm::EW_DIV, int8_t, float32_t>, fun<nm::EW_DIV, int8_t, float64_t>, fun<nm::EW_DIV, int8_t, nm::Complex64>, fun<nm::EW_DIV, int8_t, nm::Complex128>,                            \\\n         NULL},                                              \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_DIV, int16_t, uint8_t>, fun<nm::EW_DIV, int16_t, int8_t>, fun<nm::EW_DIV, int16_t, int16_t>, fun<nm::EW_DIV, int16_t, int32_t>, fun<nm::EW_DIV, int16_t, int64_t>,            \\\n        fun<nm::EW_DIV, int16_t, float32_t>, fun<nm::EW_DIV, int16_t, float64_t>, fun<nm::EW_DIV, int16_t, nm::Complex64>, fun<nm::EW_DIV, int16_t, nm::Complex128>,                        \\\n         NULL},                                            \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_DIV, int32_t, uint8_t>, fun<nm::EW_DIV, int32_t, int8_t>, fun<nm::EW_DIV, int32_t, int16_t>, fun<nm::EW_DIV, int32_t, int32_t>, fun<nm::EW_DIV, int32_t, int64_t>,            \\\n        fun<nm::EW_DIV, int32_t, float32_t>, fun<nm::EW_DIV, int32_t, float64_t>, fun<nm::EW_DIV, int32_t, nm::Complex64>, fun<nm::EW_DIV, int32_t, nm::Complex128>,                        \\\n         NULL},                                            \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_DIV, int64_t, uint8_t>, fun<nm::EW_DIV, int64_t, int8_t>, fun<nm::EW_DIV, int64_t, int16_t>, fun<nm::EW_DIV, int64_t, int32_t>, fun<nm::EW_DIV, int64_t, int64_t>,            \\\n        fun<nm::EW_DIV, int64_t, float32_t>, fun<nm::EW_DIV, int64_t, float64_t>, fun<nm::EW_DIV, int64_t, nm::Complex64>, fun<nm::EW_DIV, int64_t, nm::Complex128>,                        \\\n         NULL},                                           \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_DIV, float32_t, uint8_t>, fun<nm::EW_DIV, float32_t, int8_t>, fun<nm::EW_DIV, float32_t, int16_t>, fun<nm::EW_DIV, float32_t, int32_t>, fun<nm::EW_DIV, float32_t, int64_t>,  \\\n        fun<nm::EW_DIV, float32_t, float32_t>, fun<nm::EW_DIV, float32_t, float64_t>, fun<nm::EW_DIV, float32_t, nm::Complex64>, fun<nm::EW_DIV, float32_t, nm::Complex128>,                \\\n         NULL},                                      \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_DIV, float64_t, uint8_t>, fun<nm::EW_DIV, float64_t, int8_t>, fun<nm::EW_DIV, float64_t, int16_t>, fun<nm::EW_DIV, float64_t, int32_t>, fun<nm::EW_DIV, float64_t, int64_t>,  \\\n        fun<nm::EW_DIV, float64_t, float32_t>, fun<nm::EW_DIV, float64_t, float64_t>, fun<nm::EW_DIV, float64_t, nm::Complex64>, fun<nm::EW_DIV, float64_t, nm::Complex128>,                \\\n         NULL},                                      \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_DIV, nm::Complex64, uint8_t>, fun<nm::EW_DIV, nm::Complex64, int8_t>, fun<nm::EW_DIV, nm::Complex64, int16_t>, fun<nm::EW_DIV, nm::Complex64, int32_t>,                    \\\n        fun<nm::EW_DIV, nm::Complex64, int64_t>, fun<nm::EW_DIV, nm::Complex64, float32_t>, fun<nm::EW_DIV, nm::Complex64, float64_t>, fun<nm::EW_DIV, nm::Complex64, nm::Complex64>,        \\\n        fun<nm::EW_DIV, nm::Complex64, nm::Complex128>,                                  \\\n         NULL},                                                                                                                  \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_DIV, nm::Complex128, uint8_t>, fun<nm::EW_DIV, nm::Complex128, int8_t>, fun<nm::EW_DIV, nm::Complex128, int16_t>, fun<nm::EW_DIV, nm::Complex128, int32_t>,                \\\n        fun<nm::EW_DIV, nm::Complex128, int64_t>, fun<nm::EW_DIV, nm::Complex128, float32_t>, fun<nm::EW_DIV, nm::Complex128, float64_t>, fun<nm::EW_DIV, nm::Complex128, nm::Complex64>,    \\\n        fun<nm::EW_DIV, nm::Complex128, nm::Complex128>,                                \\\n         NULL},                                                                                                                \\\n\\\n      {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, fun<nm::EW_DIV, nm::RubyObject, nm::RubyObject>}                                                  \\\n    },                                                                                                                                                                      \\\n      \\\n    { \\\n      {fun<nm::EW_POW, uint8_t, uint8_t>, fun<nm::EW_POW, uint8_t, int8_t>, fun<nm::EW_POW, uint8_t, int16_t>, fun<nm::EW_POW, uint8_t, int32_t>, fun<nm::EW_POW, uint8_t, int64_t>,            \\\n        fun<nm::EW_POW, uint8_t, float32_t>, fun<nm::EW_POW, uint8_t, float64_t>, fun<nm::EW_POW, uint8_t, nm::Complex64>, fun<nm::EW_POW, uint8_t, nm::Complex128>,                        \\\n NULL},                                            \\\n\\\n      {fun<nm::EW_POW, int8_t, uint8_t>, fun<nm::EW_POW, int8_t, int8_t>, fun<nm::EW_POW, int8_t, int16_t>, fun<nm::EW_POW, int8_t, int32_t>, fun<nm::EW_POW, int8_t, int64_t>,                  \\\n        fun<nm::EW_POW, int8_t, float32_t>, fun<nm::EW_POW, int8_t, float64_t>, fun<nm::EW_POW, int8_t, nm::Complex64>, fun<nm::EW_POW, int8_t, nm::Complex128>,                            \\\n NULL},                                              \\\n\\\n      {fun<nm::EW_POW, int16_t, uint8_t>, fun<nm::EW_POW, int16_t, int8_t>, fun<nm::EW_POW, int16_t, int16_t>, fun<nm::EW_POW, int16_t, int32_t>, fun<nm::EW_POW, int16_t, int64_t>,            \\\n        fun<nm::EW_POW, int16_t, float32_t>, fun<nm::EW_POW, int16_t, float64_t>, fun<nm::EW_POW, int16_t, nm::Complex64>, fun<nm::EW_POW, int16_t, nm::Complex128>,                        \\\n NULL},                                            \\\n\\\n      {fun<nm::EW_POW, int32_t, uint8_t>, fun<nm::EW_POW, int32_t, int8_t>, fun<nm::EW_POW, int32_t, int16_t>, fun<nm::EW_POW, int32_t, int32_t>, fun<nm::EW_POW, int32_t, int64_t>,            \\\n        fun<nm::EW_POW, int32_t, float32_t>, fun<nm::EW_POW, int32_t, float64_t>, fun<nm::EW_POW, int32_t, nm::Complex64>, fun<nm::EW_POW, int32_t, nm::Complex128>,                        \\\n NULL},                                            \\\n\\\n      {fun<nm::EW_POW, int64_t, uint8_t>, fun<nm::EW_POW, int64_t, int8_t>, fun<nm::EW_POW, int64_t, int16_t>, fun<nm::EW_POW, int64_t, int32_t>, fun<nm::EW_POW, int64_t, int64_t>,            \\\n        fun<nm::EW_POW, int64_t, float32_t>, fun<nm::EW_POW, int64_t, float64_t>, fun<nm::EW_POW, int64_t, nm::Complex64>, fun<nm::EW_POW, int64_t, nm::Complex128>,                        \\\n NULL},                                           \\\n\\\n      {fun<nm::EW_POW, float32_t, uint8_t>, fun<nm::EW_POW, float32_t, int8_t>, fun<nm::EW_POW, float32_t, int16_t>, fun<nm::EW_POW, float32_t, int32_t>, fun<nm::EW_POW, float32_t, int64_t>,  \\\n        fun<nm::EW_POW, float32_t, float32_t>, fun<nm::EW_POW, float32_t, float64_t>, fun<nm::EW_POW, float32_t, nm::Complex64>, fun<nm::EW_POW, float32_t, nm::Complex128>,                \\\n NULL},                                      \\\n\\\n      {fun<nm::EW_POW, float64_t, uint8_t>, fun<nm::EW_POW, float64_t, int8_t>, fun<nm::EW_POW, float64_t, int16_t>, fun<nm::EW_POW, float64_t, int32_t>, fun<nm::EW_POW, float64_t, int64_t>,  \\\n        fun<nm::EW_POW, float64_t, float32_t>, fun<nm::EW_POW, float64_t, float64_t>, fun<nm::EW_POW, float64_t, nm::Complex64>, fun<nm::EW_POW, float64_t, nm::Complex128>,                \\\n NULL},                                      \\\n\\\n      {fun<nm::EW_POW, nm::Complex64, uint8_t>, fun<nm::EW_POW, nm::Complex64, int8_t>, fun<nm::EW_POW, nm::Complex64, int16_t>, fun<nm::EW_POW, nm::Complex64, int32_t>,                    \\\n        fun<nm::EW_POW, nm::Complex64, int64_t>, fun<nm::EW_POW, nm::Complex64, float32_t>, fun<nm::EW_POW, nm::Complex64, float64_t>, fun<nm::EW_POW, nm::Complex64, nm::Complex64>,        \\\n        fun<nm::EW_POW, nm::Complex64, nm::Complex128>,                                  \\\n NULL},                                                                                                                  \\\n\\\n      {fun<nm::EW_POW, nm::Complex128, uint8_t>, fun<nm::EW_POW, nm::Complex128, int8_t>, fun<nm::EW_POW, nm::Complex128, int16_t>, fun<nm::EW_POW, nm::Complex128, int32_t>,                \\\n        fun<nm::EW_POW, nm::Complex128, int64_t>, fun<nm::EW_POW, nm::Complex128, float32_t>, fun<nm::EW_POW, nm::Complex128, float64_t>, fun<nm::EW_POW, nm::Complex128, nm::Complex64>,    \\\n        fun<nm::EW_POW, nm::Complex128, nm::Complex128>,                                \\\n NULL},                                                                                                                \\\n\\\n      {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, fun<nm::EW_POW, nm::RubyObject, nm::RubyObject>}                                                  \\\n    },  \\\n\\\n    {                                                                                                                                                                        \\\n      {fun<nm::EW_MOD, uint8_t, uint8_t>, fun<nm::EW_MOD, uint8_t, int8_t>, fun<nm::EW_MOD, uint8_t, int16_t>, fun<nm::EW_MOD, uint8_t, int32_t>, fun<nm::EW_MOD, uint8_t, int64_t>,            \\\n        fun<nm::EW_MOD, uint8_t, float32_t>, fun<nm::EW_MOD, uint8_t, float64_t>, fun<nm::EW_MOD, uint8_t, nm::Complex64>, fun<nm::EW_MOD, uint8_t, nm::Complex128>,                        \\\n         NULL},                                            \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_MOD, int8_t, uint8_t>, fun<nm::EW_MOD, int8_t, int8_t>, fun<nm::EW_MOD, int8_t, int16_t>, fun<nm::EW_MOD, int8_t, int32_t>, fun<nm::EW_MOD, int8_t, int64_t>,                  \\\n        fun<nm::EW_MOD, int8_t, float32_t>, fun<nm::EW_MOD, int8_t, float64_t>, fun<nm::EW_MOD, int8_t, nm::Complex64>, fun<nm::EW_MOD, int8_t, nm::Complex128>,                            \\\n         NULL},                                              \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_MOD, int16_t, uint8_t>, fun<nm::EW_MOD, int16_t, int8_t>, fun<nm::EW_MOD, int16_t, int16_t>, fun<nm::EW_MOD, int16_t, int32_t>, fun<nm::EW_MOD, int16_t, int64_t>,            \\\n        fun<nm::EW_MOD, int16_t, float32_t>, fun<nm::EW_MOD, int16_t, float64_t>, fun<nm::EW_MOD, int16_t, nm::Complex64>, fun<nm::EW_MOD, int16_t, nm::Complex128>,                        \\\n         NULL},                                            \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_MOD, int32_t, uint8_t>, fun<nm::EW_MOD, int32_t, int8_t>, fun<nm::EW_MOD, int32_t, int16_t>, fun<nm::EW_MOD, int32_t, int32_t>, fun<nm::EW_MOD, int32_t, int64_t>,            \\\n        fun<nm::EW_MOD, int32_t, float32_t>, fun<nm::EW_MOD, int32_t, float64_t>, fun<nm::EW_MOD, int32_t, nm::Complex64>, fun<nm::EW_MOD, int32_t, nm::Complex128>,                        \\\n         NULL},                                            \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_MOD, int64_t, uint8_t>, fun<nm::EW_MOD, int64_t, int8_t>, fun<nm::EW_MOD, int64_t, int16_t>, fun<nm::EW_MOD, int64_t, int32_t>, fun<nm::EW_MOD, int64_t, int64_t>,            \\\n        fun<nm::EW_MOD, int64_t, float32_t>, fun<nm::EW_MOD, int64_t, float64_t>, fun<nm::EW_MOD, int64_t, nm::Complex64>, fun<nm::EW_MOD, int64_t, nm::Complex128>,                        \\\n         NULL},                                           \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_MOD, float32_t, uint8_t>, fun<nm::EW_MOD, float32_t, int8_t>, fun<nm::EW_MOD, float32_t, int16_t>, fun<nm::EW_MOD, float32_t, int32_t>, fun<nm::EW_MOD, float32_t, int64_t>,  \\\n        fun<nm::EW_MOD, float32_t, float32_t>, fun<nm::EW_MOD, float32_t, float64_t>, fun<nm::EW_MOD, float32_t, nm::Complex64>, fun<nm::EW_MOD, float32_t, nm::Complex128>,                \\\n         NULL},                                      \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_MOD, float64_t, uint8_t>, fun<nm::EW_MOD, float64_t, int8_t>, fun<nm::EW_MOD, float64_t, int16_t>, fun<nm::EW_MOD, float64_t, int32_t>, fun<nm::EW_MOD, float64_t, int64_t>,  \\\n        fun<nm::EW_MOD, float64_t, float32_t>, fun<nm::EW_MOD, float64_t, float64_t>, fun<nm::EW_MOD, float64_t, nm::Complex64>, fun<nm::EW_MOD, float64_t, nm::Complex128>,                \\\n         NULL},                                      \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_MOD, nm::Complex64, uint8_t>, fun<nm::EW_MOD, nm::Complex64, int8_t>, fun<nm::EW_MOD, nm::Complex64, int16_t>, fun<nm::EW_MOD, nm::Complex64, int32_t>,                    \\\n        fun<nm::EW_MOD, nm::Complex64, int64_t>, fun<nm::EW_MOD, nm::Complex64, float32_t>, fun<nm::EW_MOD, nm::Complex64, float64_t>, fun<nm::EW_MOD, nm::Complex64, nm::Complex64>,        \\\n        fun<nm::EW_MOD, nm::Complex64, nm::Complex128>,                                  \\\n         NULL},                                                                                                                  \\\n                                                                                                                                                                            \\\n      {fun<nm::EW_MOD, nm::Complex128, uint8_t>, fun<nm::EW_MOD, nm::Complex128, int8_t>, fun<nm::EW_MOD, nm::Complex128, int16_t>, fun<nm::EW_MOD, nm::Complex128, int32_t>,                \\\n        fun<nm::EW_MOD, nm::Complex128, int64_t>, fun<nm::EW_MOD, nm::Complex128, float32_t>, fun<nm::EW_MOD, nm::Complex128, float64_t>, fun<nm::EW_MOD, nm::Complex128, nm::Complex64>,    \\\n        fun<nm::EW_MOD, nm::Complex128, nm::Complex128>,                                \\\n         NULL},                                                                                                                \\\n\\\n      {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, fun<nm::EW_MOD, nm::RubyObject, nm::RubyObject>}                                                  \\\n    },                                                                                                                                                                      \\\n                                                                                                                                                                            \\\n    {                                                                                                                                                                       \\\n      {fun<nm::EW_EQEQ, uint8_t, uint8_t>, fun<nm::EW_EQEQ, uint8_t, int8_t>, fun<nm::EW_EQEQ, uint8_t, int16_t>, fun<nm::EW_EQEQ, uint8_t, int32_t>, \\\n        fun<nm::EW_EQEQ, uint8_t, int64_t>, fun<nm::EW_EQEQ, uint8_t, float32_t>, fun<nm::EW_EQEQ, uint8_t, float64_t>, fun<nm::EW_EQEQ, uint8_t, nm::Complex64>, \\\n        fun<nm::EW_EQEQ, uint8_t, nm::Complex128>, \\\n NULL}, \\\n      {fun<nm::EW_EQEQ, int8_t, uint8_t>, fun<nm::EW_EQEQ, int8_t, int8_t>, fun<nm::EW_EQEQ, int8_t, int16_t>, fun<nm::EW_EQEQ, int8_t, int32_t>, fun<nm::EW_EQEQ, int8_t, int64_t>, fun<nm::EW_EQEQ, int8_t, float32_t>, fun<nm::EW_EQEQ, int8_t, float64_t>, fun<nm::EW_EQEQ, int8_t, nm::Complex64>, fun<nm::EW_EQEQ, int8_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_EQEQ, int16_t, uint8_t>, fun<nm::EW_EQEQ, int16_t, int8_t>, fun<nm::EW_EQEQ, int16_t, int16_t>, fun<nm::EW_EQEQ, int16_t, int32_t>, fun<nm::EW_EQEQ, int16_t, int64_t>, fun<nm::EW_EQEQ, int16_t, float32_t>, fun<nm::EW_EQEQ, int16_t, float64_t>, fun<nm::EW_EQEQ, int16_t, nm::Complex64>, fun<nm::EW_EQEQ, int16_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_EQEQ, int32_t, uint8_t>, fun<nm::EW_EQEQ, int32_t, int8_t>, fun<nm::EW_EQEQ, int32_t, int16_t>, fun<nm::EW_EQEQ, int32_t, int32_t>, fun<nm::EW_EQEQ, int32_t, int64_t>, fun<nm::EW_EQEQ, int32_t, float32_t>, fun<nm::EW_EQEQ, int32_t, float64_t>, fun<nm::EW_EQEQ, int32_t, nm::Complex64>, fun<nm::EW_EQEQ, int32_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_EQEQ, int64_t, uint8_t>, fun<nm::EW_EQEQ, int64_t, int8_t>, fun<nm::EW_EQEQ, int64_t, int16_t>, fun<nm::EW_EQEQ, int64_t, int32_t>, fun<nm::EW_EQEQ, int64_t, int64_t>, fun<nm::EW_EQEQ, int64_t, float32_t>, fun<nm::EW_EQEQ, int64_t, float64_t>, fun<nm::EW_EQEQ, int64_t, nm::Complex64>, fun<nm::EW_EQEQ, int64_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_EQEQ, float32_t, uint8_t>, fun<nm::EW_EQEQ, float32_t, int8_t>, fun<nm::EW_EQEQ, float32_t, int16_t>, fun<nm::EW_EQEQ, float32_t, int32_t>, fun<nm::EW_EQEQ, float32_t, int64_t>, fun<nm::EW_EQEQ, float32_t, float32_t>, fun<nm::EW_EQEQ, float32_t, float64_t>, fun<nm::EW_EQEQ, float32_t, nm::Complex64>, fun<nm::EW_EQEQ, float32_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_EQEQ, float64_t, uint8_t>, fun<nm::EW_EQEQ, float64_t, int8_t>, fun<nm::EW_EQEQ, float64_t, int16_t>, fun<nm::EW_EQEQ, float64_t, int32_t>, fun<nm::EW_EQEQ, float64_t, int64_t>, fun<nm::EW_EQEQ, float64_t, float32_t>, fun<nm::EW_EQEQ, float64_t, float64_t>, fun<nm::EW_EQEQ, float64_t, nm::Complex64>, fun<nm::EW_EQEQ, float64_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_EQEQ, nm::Complex64, uint8_t>, fun<nm::EW_EQEQ, nm::Complex64, int8_t>, fun<nm::EW_EQEQ, nm::Complex64, int16_t>, fun<nm::EW_EQEQ, nm::Complex64, int32_t>, fun<nm::EW_EQEQ, nm::Complex64, int64_t>, fun<nm::EW_EQEQ, nm::Complex64, float32_t>, fun<nm::EW_EQEQ, nm::Complex64, float64_t>, fun<nm::EW_EQEQ, nm::Complex64, nm::Complex64>, fun<nm::EW_EQEQ, nm::Complex64, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_EQEQ, nm::Complex128, uint8_t>, fun<nm::EW_EQEQ, nm::Complex128, int8_t>, fun<nm::EW_EQEQ, nm::Complex128, int16_t>, fun<nm::EW_EQEQ, nm::Complex128, int32_t>, fun<nm::EW_EQEQ, nm::Complex128, int64_t>, fun<nm::EW_EQEQ, nm::Complex128, float32_t>, fun<nm::EW_EQEQ, nm::Complex128, float64_t>, fun<nm::EW_EQEQ, nm::Complex128, nm::Complex64>, fun<nm::EW_EQEQ, nm::Complex128, nm::Complex128>, NULL}, \\\n      {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, fun<nm::EW_EQEQ, nm::RubyObject, nm::RubyObject>}  \\\n    }, \\\n    {{fun<nm::EW_NEQ, uint8_t, uint8_t>, fun<nm::EW_NEQ, uint8_t, int8_t>, fun<nm::EW_NEQ, uint8_t, int16_t>, fun<nm::EW_NEQ, uint8_t, int32_t>, fun<nm::EW_NEQ, uint8_t, int64_t>, fun<nm::EW_NEQ, uint8_t, float32_t>, fun<nm::EW_NEQ, uint8_t, float64_t>, fun<nm::EW_NEQ, uint8_t, nm::Complex64>, fun<nm::EW_NEQ, uint8_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_NEQ, int8_t, uint8_t>, fun<nm::EW_NEQ, int8_t, int8_t>, fun<nm::EW_NEQ, int8_t, int16_t>, fun<nm::EW_NEQ, int8_t, int32_t>, fun<nm::EW_NEQ, int8_t, int64_t>, fun<nm::EW_NEQ, int8_t, float32_t>, fun<nm::EW_NEQ, int8_t, float64_t>, fun<nm::EW_NEQ, int8_t, nm::Complex64>, fun<nm::EW_NEQ, int8_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_NEQ, int16_t, uint8_t>, fun<nm::EW_NEQ, int16_t, int8_t>, fun<nm::EW_NEQ, int16_t, int16_t>, fun<nm::EW_NEQ, int16_t, int32_t>, fun<nm::EW_NEQ, int16_t, int64_t>, fun<nm::EW_NEQ, int16_t, float32_t>, fun<nm::EW_NEQ, int16_t, float64_t>, fun<nm::EW_NEQ, int16_t, nm::Complex64>, fun<nm::EW_NEQ, int16_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_NEQ, int32_t, uint8_t>, fun<nm::EW_NEQ, int32_t, int8_t>, fun<nm::EW_NEQ, int32_t, int16_t>, fun<nm::EW_NEQ, int32_t, int32_t>, fun<nm::EW_NEQ, int32_t, int64_t>, fun<nm::EW_NEQ, int32_t, float32_t>, fun<nm::EW_NEQ, int32_t, float64_t>, fun<nm::EW_NEQ, int32_t, nm::Complex64>, fun<nm::EW_NEQ, int32_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_NEQ, int64_t, uint8_t>, fun<nm::EW_NEQ, int64_t, int8_t>, fun<nm::EW_NEQ, int64_t, int16_t>, fun<nm::EW_NEQ, int64_t, int32_t>, fun<nm::EW_NEQ, int64_t, int64_t>, fun<nm::EW_NEQ, int64_t, float32_t>, fun<nm::EW_NEQ, int64_t, float64_t>, fun<nm::EW_NEQ, int64_t, nm::Complex64>, fun<nm::EW_NEQ, int64_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_NEQ, float32_t, uint8_t>, fun<nm::EW_NEQ, float32_t, int8_t>, fun<nm::EW_NEQ, float32_t, int16_t>, fun<nm::EW_NEQ, float32_t, int32_t>, fun<nm::EW_NEQ, float32_t, int64_t>, fun<nm::EW_NEQ, float32_t, float32_t>, fun<nm::EW_NEQ, float32_t, float64_t>, fun<nm::EW_NEQ, float32_t, nm::Complex64>, fun<nm::EW_NEQ, float32_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_NEQ, float64_t, uint8_t>, fun<nm::EW_NEQ, float64_t, int8_t>, fun<nm::EW_NEQ, float64_t, int16_t>, fun<nm::EW_NEQ, float64_t, int32_t>, fun<nm::EW_NEQ, float64_t, int64_t>, fun<nm::EW_NEQ, float64_t, float32_t>, fun<nm::EW_NEQ, float64_t, float64_t>, fun<nm::EW_NEQ, float64_t, nm::Complex64>, fun<nm::EW_NEQ, float64_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_NEQ, nm::Complex64, uint8_t>, fun<nm::EW_NEQ, nm::Complex64, int8_t>, fun<nm::EW_NEQ, nm::Complex64, int16_t>, fun<nm::EW_NEQ, nm::Complex64, int32_t>, fun<nm::EW_NEQ, nm::Complex64, int64_t>, fun<nm::EW_NEQ, nm::Complex64, float32_t>, fun<nm::EW_NEQ, nm::Complex64, float64_t>, fun<nm::EW_NEQ, nm::Complex64, nm::Complex64>, fun<nm::EW_NEQ, nm::Complex64, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_NEQ, nm::Complex128, uint8_t>, fun<nm::EW_NEQ, nm::Complex128, int8_t>, fun<nm::EW_NEQ, nm::Complex128, int16_t>, fun<nm::EW_NEQ, nm::Complex128, int32_t>, fun<nm::EW_NEQ, nm::Complex128, int64_t>, fun<nm::EW_NEQ, nm::Complex128, float32_t>, fun<nm::EW_NEQ, nm::Complex128, float64_t>, fun<nm::EW_NEQ, nm::Complex128, nm::Complex64>, fun<nm::EW_NEQ, nm::Complex128, nm::Complex128>, NULL}, \\\n      {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, fun<nm::EW_NEQ, nm::RubyObject, nm::RubyObject>}}, \\\n    {{fun<nm::EW_LT, uint8_t, uint8_t>, fun<nm::EW_LT, uint8_t, int8_t>, fun<nm::EW_LT, uint8_t, int16_t>, fun<nm::EW_LT, uint8_t, int32_t>, fun<nm::EW_LT, uint8_t, int64_t>, fun<nm::EW_LT, uint8_t, float32_t>, fun<nm::EW_LT, uint8_t, float64_t>, fun<nm::EW_LT, uint8_t, nm::Complex64>, fun<nm::EW_LT, uint8_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_LT, int8_t, uint8_t>, fun<nm::EW_LT, int8_t, int8_t>, fun<nm::EW_LT, int8_t, int16_t>, fun<nm::EW_LT, int8_t, int32_t>, fun<nm::EW_LT, int8_t, int64_t>, fun<nm::EW_LT, int8_t, float32_t>, fun<nm::EW_LT, int8_t, float64_t>, fun<nm::EW_LT, int8_t, nm::Complex64>, fun<nm::EW_LT, int8_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_LT, int16_t, uint8_t>, fun<nm::EW_LT, int16_t, int8_t>, fun<nm::EW_LT, int16_t, int16_t>, fun<nm::EW_LT, int16_t, int32_t>, fun<nm::EW_LT, int16_t, int64_t>, fun<nm::EW_LT, int16_t, float32_t>, fun<nm::EW_LT, int16_t, float64_t>, fun<nm::EW_LT, int16_t, nm::Complex64>, fun<nm::EW_LT, int16_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_LT, int32_t, uint8_t>, fun<nm::EW_LT, int32_t, int8_t>, fun<nm::EW_LT, int32_t, int16_t>, fun<nm::EW_LT, int32_t, int32_t>, fun<nm::EW_LT, int32_t, int64_t>, fun<nm::EW_LT, int32_t, float32_t>, fun<nm::EW_LT, int32_t, float64_t>, fun<nm::EW_LT, int32_t, nm::Complex64>, fun<nm::EW_LT, int32_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_LT, int64_t, uint8_t>, fun<nm::EW_LT, int64_t, int8_t>, fun<nm::EW_LT, int64_t, int16_t>, fun<nm::EW_LT, int64_t, int32_t>, fun<nm::EW_LT, int64_t, int64_t>, fun<nm::EW_LT, int64_t, float32_t>, fun<nm::EW_LT, int64_t, float64_t>, fun<nm::EW_LT, int64_t, nm::Complex64>, fun<nm::EW_LT, int64_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_LT, float32_t, uint8_t>, fun<nm::EW_LT, float32_t, int8_t>, fun<nm::EW_LT, float32_t, int16_t>, fun<nm::EW_LT, float32_t, int32_t>, fun<nm::EW_LT, float32_t, int64_t>, fun<nm::EW_LT, float32_t, float32_t>, fun<nm::EW_LT, float32_t, float64_t>, fun<nm::EW_LT, float32_t, nm::Complex64>, fun<nm::EW_LT, float32_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_LT, float64_t, uint8_t>, fun<nm::EW_LT, float64_t, int8_t>, fun<nm::EW_LT, float64_t, int16_t>, fun<nm::EW_LT, float64_t, int32_t>, fun<nm::EW_LT, float64_t, int64_t>, fun<nm::EW_LT, float64_t, float32_t>, fun<nm::EW_LT, float64_t, float64_t>, fun<nm::EW_LT, float64_t, nm::Complex64>, fun<nm::EW_LT, float64_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_LT, nm::Complex64, uint8_t>, fun<nm::EW_LT, nm::Complex64, int8_t>, fun<nm::EW_LT, nm::Complex64, int16_t>, fun<nm::EW_LT, nm::Complex64, int32_t>, fun<nm::EW_LT, nm::Complex64, int64_t>, fun<nm::EW_LT, nm::Complex64, float32_t>, fun<nm::EW_LT, nm::Complex64, float64_t>, fun<nm::EW_LT, nm::Complex64, nm::Complex64>, fun<nm::EW_LT, nm::Complex64, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_LT, nm::Complex128, uint8_t>, fun<nm::EW_LT, nm::Complex128, int8_t>, fun<nm::EW_LT, nm::Complex128, int16_t>, fun<nm::EW_LT, nm::Complex128, int32_t>, fun<nm::EW_LT, nm::Complex128, int64_t>, fun<nm::EW_LT, nm::Complex128, float32_t>, fun<nm::EW_LT, nm::Complex128, float64_t>, fun<nm::EW_LT, nm::Complex128, nm::Complex64>, fun<nm::EW_LT, nm::Complex128, nm::Complex128>, NULL}, \\\n      {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, fun<nm::EW_LT, nm::RubyObject, nm::RubyObject>}}, \\\n    {{fun<nm::EW_GT, uint8_t, uint8_t>, fun<nm::EW_GT, uint8_t, int8_t>, fun<nm::EW_GT, uint8_t, int16_t>, fun<nm::EW_GT, uint8_t, int32_t>, fun<nm::EW_GT, uint8_t, int64_t>, fun<nm::EW_GT, uint8_t, float32_t>, fun<nm::EW_GT, uint8_t, float64_t>, fun<nm::EW_GT, uint8_t, nm::Complex64>, fun<nm::EW_GT, uint8_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_GT, int8_t, uint8_t>, fun<nm::EW_GT, int8_t, int8_t>, fun<nm::EW_GT, int8_t, int16_t>, fun<nm::EW_GT, int8_t, int32_t>, fun<nm::EW_GT, int8_t, int64_t>, fun<nm::EW_GT, int8_t, float32_t>, fun<nm::EW_GT, int8_t, float64_t>, fun<nm::EW_GT, int8_t, nm::Complex64>, fun<nm::EW_GT, int8_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_GT, int16_t, uint8_t>, fun<nm::EW_GT, int16_t, int8_t>, fun<nm::EW_GT, int16_t, int16_t>, fun<nm::EW_GT, int16_t, int32_t>, fun<nm::EW_GT, int16_t, int64_t>, fun<nm::EW_GT, int16_t, float32_t>, fun<nm::EW_GT, int16_t, float64_t>, fun<nm::EW_GT, int16_t, nm::Complex64>, fun<nm::EW_GT, int16_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_GT, int32_t, uint8_t>, fun<nm::EW_GT, int32_t, int8_t>, fun<nm::EW_GT, int32_t, int16_t>, fun<nm::EW_GT, int32_t, int32_t>, fun<nm::EW_GT, int32_t, int64_t>, fun<nm::EW_GT, int32_t, float32_t>, fun<nm::EW_GT, int32_t, float64_t>, fun<nm::EW_GT, int32_t, nm::Complex64>, fun<nm::EW_GT, int32_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_GT, int64_t, uint8_t>, fun<nm::EW_GT, int64_t, int8_t>, fun<nm::EW_GT, int64_t, int16_t>, fun<nm::EW_GT, int64_t, int32_t>, fun<nm::EW_GT, int64_t, int64_t>, fun<nm::EW_GT, int64_t, float32_t>, fun<nm::EW_GT, int64_t, float64_t>, fun<nm::EW_GT, int64_t, nm::Complex64>, fun<nm::EW_GT, int64_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_GT, float32_t, uint8_t>, fun<nm::EW_GT, float32_t, int8_t>, fun<nm::EW_GT, float32_t, int16_t>, fun<nm::EW_GT, float32_t, int32_t>, fun<nm::EW_GT, float32_t, int64_t>, fun<nm::EW_GT, float32_t, float32_t>, fun<nm::EW_GT, float32_t, float64_t>, fun<nm::EW_GT, float32_t, nm::Complex64>, fun<nm::EW_GT, float32_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_GT, float64_t, uint8_t>, fun<nm::EW_GT, float64_t, int8_t>, fun<nm::EW_GT, float64_t, int16_t>, fun<nm::EW_GT, float64_t, int32_t>, fun<nm::EW_GT, float64_t, int64_t>, fun<nm::EW_GT, float64_t, float32_t>, fun<nm::EW_GT, float64_t, float64_t>, fun<nm::EW_GT, float64_t, nm::Complex64>, fun<nm::EW_GT, float64_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_GT, nm::Complex64, uint8_t>, fun<nm::EW_GT, nm::Complex64, int8_t>, fun<nm::EW_GT, nm::Complex64, int16_t>, fun<nm::EW_GT, nm::Complex64, int32_t>, fun<nm::EW_GT, nm::Complex64, int64_t>, fun<nm::EW_GT, nm::Complex64, float32_t>, fun<nm::EW_GT, nm::Complex64, float64_t>, fun<nm::EW_GT, nm::Complex64, nm::Complex64>, fun<nm::EW_GT, nm::Complex64, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_GT, nm::Complex128, uint8_t>, fun<nm::EW_GT, nm::Complex128, int8_t>, fun<nm::EW_GT, nm::Complex128, int16_t>, fun<nm::EW_GT, nm::Complex128, int32_t>, fun<nm::EW_GT, nm::Complex128, int64_t>, fun<nm::EW_GT, nm::Complex128, float32_t>, fun<nm::EW_GT, nm::Complex128, float64_t>, fun<nm::EW_GT, nm::Complex128, nm::Complex64>, fun<nm::EW_GT, nm::Complex128, nm::Complex128>, NULL}, \\\n      {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, fun<nm::EW_GT, nm::RubyObject, nm::RubyObject>}}, \\\n    {{fun<nm::EW_LEQ, uint8_t, uint8_t>, fun<nm::EW_LEQ, uint8_t, int8_t>, fun<nm::EW_LEQ, uint8_t, int16_t>, fun<nm::EW_LEQ, uint8_t, int32_t>, fun<nm::EW_LEQ, uint8_t, int64_t>, fun<nm::EW_LEQ, uint8_t, float32_t>, fun<nm::EW_LEQ, uint8_t, float64_t>, fun<nm::EW_LEQ, uint8_t, nm::Complex64>, fun<nm::EW_LEQ, uint8_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_LEQ, int8_t, uint8_t>, fun<nm::EW_LEQ, int8_t, int8_t>, fun<nm::EW_LEQ, int8_t, int16_t>, fun<nm::EW_LEQ, int8_t, int32_t>, fun<nm::EW_LEQ, int8_t, int64_t>, fun<nm::EW_LEQ, int8_t, float32_t>, fun<nm::EW_LEQ, int8_t, float64_t>, fun<nm::EW_LEQ, int8_t, nm::Complex64>, fun<nm::EW_LEQ, int8_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_LEQ, int16_t, uint8_t>, fun<nm::EW_LEQ, int16_t, int8_t>, fun<nm::EW_LEQ, int16_t, int16_t>, fun<nm::EW_LEQ, int16_t, int32_t>, fun<nm::EW_LEQ, int16_t, int64_t>, fun<nm::EW_LEQ, int16_t, float32_t>, fun<nm::EW_LEQ, int16_t, float64_t>, fun<nm::EW_LEQ, int16_t, nm::Complex64>, fun<nm::EW_LEQ, int16_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_LEQ, int32_t, uint8_t>, fun<nm::EW_LEQ, int32_t, int8_t>, fun<nm::EW_LEQ, int32_t, int16_t>, fun<nm::EW_LEQ, int32_t, int32_t>, fun<nm::EW_LEQ, int32_t, int64_t>, fun<nm::EW_LEQ, int32_t, float32_t>, fun<nm::EW_LEQ, int32_t, float64_t>, fun<nm::EW_LEQ, int32_t, nm::Complex64>, fun<nm::EW_LEQ, int32_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_LEQ, int64_t, uint8_t>, fun<nm::EW_LEQ, int64_t, int8_t>, fun<nm::EW_LEQ, int64_t, int16_t>, fun<nm::EW_LEQ, int64_t, int32_t>, fun<nm::EW_LEQ, int64_t, int64_t>, fun<nm::EW_LEQ, int64_t, float32_t>, fun<nm::EW_LEQ, int64_t, float64_t>, fun<nm::EW_LEQ, int64_t, nm::Complex64>, fun<nm::EW_LEQ, int64_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_LEQ, float32_t, uint8_t>, fun<nm::EW_LEQ, float32_t, int8_t>, fun<nm::EW_LEQ, float32_t, int16_t>, fun<nm::EW_LEQ, float32_t, int32_t>, fun<nm::EW_LEQ, float32_t, int64_t>, fun<nm::EW_LEQ, float32_t, float32_t>, fun<nm::EW_LEQ, float32_t, float64_t>, fun<nm::EW_LEQ, float32_t, nm::Complex64>, fun<nm::EW_LEQ, float32_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_LEQ, float64_t, uint8_t>, fun<nm::EW_LEQ, float64_t, int8_t>, fun<nm::EW_LEQ, float64_t, int16_t>, fun<nm::EW_LEQ, float64_t, int32_t>, fun<nm::EW_LEQ, float64_t, int64_t>, fun<nm::EW_LEQ, float64_t, float32_t>, fun<nm::EW_LEQ, float64_t, float64_t>, fun<nm::EW_LEQ, float64_t, nm::Complex64>, fun<nm::EW_LEQ, float64_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_LEQ, nm::Complex64, uint8_t>, fun<nm::EW_LEQ, nm::Complex64, int8_t>, fun<nm::EW_LEQ, nm::Complex64, int16_t>, fun<nm::EW_LEQ, nm::Complex64, int32_t>, fun<nm::EW_LEQ, nm::Complex64, int64_t>, fun<nm::EW_LEQ, nm::Complex64, float32_t>, fun<nm::EW_LEQ, nm::Complex64, float64_t>, fun<nm::EW_LEQ, nm::Complex64, nm::Complex64>, fun<nm::EW_LEQ, nm::Complex64, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_LEQ, nm::Complex128, uint8_t>, fun<nm::EW_LEQ, nm::Complex128, int8_t>, fun<nm::EW_LEQ, nm::Complex128, int16_t>, fun<nm::EW_LEQ, nm::Complex128, int32_t>, fun<nm::EW_LEQ, nm::Complex128, int64_t>, fun<nm::EW_LEQ, nm::Complex128, float32_t>, fun<nm::EW_LEQ, nm::Complex128, float64_t>, fun<nm::EW_LEQ, nm::Complex128, nm::Complex64>, fun<nm::EW_LEQ, nm::Complex128, nm::Complex128>, NULL}, \\\n      {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, fun<nm::EW_LEQ, nm::RubyObject, nm::RubyObject>}}, \\\n    {{fun<nm::EW_GEQ, uint8_t, uint8_t>, fun<nm::EW_GEQ, uint8_t, int8_t>, fun<nm::EW_GEQ, uint8_t, int16_t>, fun<nm::EW_GEQ, uint8_t, int32_t>, fun<nm::EW_GEQ, uint8_t, int64_t>, fun<nm::EW_GEQ, uint8_t, float32_t>, fun<nm::EW_GEQ, uint8_t, float64_t>, fun<nm::EW_GEQ, uint8_t, nm::Complex64>, fun<nm::EW_GEQ, uint8_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_GEQ, int8_t, uint8_t>, fun<nm::EW_GEQ, int8_t, int8_t>, fun<nm::EW_GEQ, int8_t, int16_t>, fun<nm::EW_GEQ, int8_t, int32_t>, fun<nm::EW_GEQ, int8_t, int64_t>, fun<nm::EW_GEQ, int8_t, float32_t>, fun<nm::EW_GEQ, int8_t, float64_t>, fun<nm::EW_GEQ, int8_t, nm::Complex64>, fun<nm::EW_GEQ, int8_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_GEQ, int16_t, uint8_t>, fun<nm::EW_GEQ, int16_t, int8_t>, fun<nm::EW_GEQ, int16_t, int16_t>, fun<nm::EW_GEQ, int16_t, int32_t>, fun<nm::EW_GEQ, int16_t, int64_t>, fun<nm::EW_GEQ, int16_t, float32_t>, fun<nm::EW_GEQ, int16_t, float64_t>, fun<nm::EW_GEQ, int16_t, nm::Complex64>, fun<nm::EW_GEQ, int16_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_GEQ, int32_t, uint8_t>, fun<nm::EW_GEQ, int32_t, int8_t>, fun<nm::EW_GEQ, int32_t, int16_t>, fun<nm::EW_GEQ, int32_t, int32_t>, fun<nm::EW_GEQ, int32_t, int64_t>, fun<nm::EW_GEQ, int32_t, float32_t>, fun<nm::EW_GEQ, int32_t, float64_t>, fun<nm::EW_GEQ, int32_t, nm::Complex64>, fun<nm::EW_GEQ, int32_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_GEQ, int64_t, uint8_t>, fun<nm::EW_GEQ, int64_t, int8_t>, fun<nm::EW_GEQ, int64_t, int16_t>, fun<nm::EW_GEQ, int64_t, int32_t>, fun<nm::EW_GEQ, int64_t, int64_t>, fun<nm::EW_GEQ, int64_t, float32_t>, fun<nm::EW_GEQ, int64_t, float64_t>, fun<nm::EW_GEQ, int64_t, nm::Complex64>, fun<nm::EW_GEQ, int64_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_GEQ, float32_t, uint8_t>, fun<nm::EW_GEQ, float32_t, int8_t>, fun<nm::EW_GEQ, float32_t, int16_t>, fun<nm::EW_GEQ, float32_t, int32_t>, fun<nm::EW_GEQ, float32_t, int64_t>, fun<nm::EW_GEQ, float32_t, float32_t>, fun<nm::EW_GEQ, float32_t, float64_t>, fun<nm::EW_GEQ, float32_t, nm::Complex64>, fun<nm::EW_GEQ, float32_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_GEQ, float64_t, uint8_t>, fun<nm::EW_GEQ, float64_t, int8_t>, fun<nm::EW_GEQ, float64_t, int16_t>, fun<nm::EW_GEQ, float64_t, int32_t>, fun<nm::EW_GEQ, float64_t, int64_t>, fun<nm::EW_GEQ, float64_t, float32_t>, fun<nm::EW_GEQ, float64_t, float64_t>, fun<nm::EW_GEQ, float64_t, nm::Complex64>, fun<nm::EW_GEQ, float64_t, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_GEQ, nm::Complex64, uint8_t>, fun<nm::EW_GEQ, nm::Complex64, int8_t>, fun<nm::EW_GEQ, nm::Complex64, int16_t>, fun<nm::EW_GEQ, nm::Complex64, int32_t>, fun<nm::EW_GEQ, nm::Complex64, int64_t>, fun<nm::EW_GEQ, nm::Complex64, float32_t>, fun<nm::EW_GEQ, nm::Complex64, float64_t>, fun<nm::EW_GEQ, nm::Complex64, nm::Complex64>, fun<nm::EW_GEQ, nm::Complex64, nm::Complex128>, NULL}, \\\n      {fun<nm::EW_GEQ, nm::Complex128, uint8_t>, fun<nm::EW_GEQ, nm::Complex128, int8_t>, fun<nm::EW_GEQ, nm::Complex128, int16_t>, fun<nm::EW_GEQ, nm::Complex128, int32_t>, fun<nm::EW_GEQ, nm::Complex128, int64_t>, fun<nm::EW_GEQ, nm::Complex128, float32_t>, fun<nm::EW_GEQ, nm::Complex128, float64_t>, fun<nm::EW_GEQ, nm::Complex128, nm::Complex64>, fun<nm::EW_GEQ, nm::Complex128, nm::Complex128>, NULL}, \\\n      {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, fun<nm::EW_GEQ, nm::RubyObject, nm::RubyObject>} \\\n    } \\\n  };\n\n/*\n * Defines a static array that holds function pointers to an elementwise op,\n * itype, dtype templated versions of the specified function.\n */\n#define OP_ITYPE_DTYPE_TEMPLATE_TABLE(fun, ret, ...) NAMED_OP_ITYPE_DTYPE_TEMPLATE_TABLE(ttable, fun, ret, __VA_ARGS__)\n\n#define NAMED_OP_ITYPE_DTYPE_TEMPLATE_TABLE(name,  fun,  ret,  ...) \\\n  static ret (*(name)[nm::NUM_EWOPS][nm::NUM_ITYPES][nm::NUM_DTYPES])(__VA_ARGS__) = \\\n    {{{fun<nm::EW_ADD, uint8_t, uint8_t>,fun<nm::EW_ADD, uint8_t, int8_t>,fun<nm::EW_ADD, uint8_t, int16_t>,fun<nm::EW_ADD, uint8_t, int32_t>,fun<nm::EW_ADD, uint8_t, int64_t>,fun<nm::EW_ADD, uint8_t, float32_t>,fun<nm::EW_ADD, uint8_t, float64_t>,fun<nm::EW_ADD, uint8_t, nm::Complex64>,fun<nm::EW_ADD, uint8_t, nm::Complex128>,fun<nm::EW_ADD, uint8_t, nm::RubyObject>},\\\n{fun<nm::EW_ADD, uint16_t, uint8_t>,fun<nm::EW_ADD, uint16_t, int8_t>,fun<nm::EW_ADD, uint16_t, int16_t>,fun<nm::EW_ADD, uint16_t, int32_t>,fun<nm::EW_ADD, uint16_t, int64_t>,fun<nm::EW_ADD, uint16_t, float32_t>,fun<nm::EW_ADD, uint16_t, float64_t>,fun<nm::EW_ADD, uint16_t, nm::Complex64>,fun<nm::EW_ADD, uint16_t, nm::Complex128>,fun<nm::EW_ADD, uint16_t, nm::RubyObject>},\\\n{fun<nm::EW_ADD, uint32_t, uint8_t>,fun<nm::EW_ADD, uint32_t, int8_t>,fun<nm::EW_ADD, uint32_t, int16_t>,fun<nm::EW_ADD, uint32_t, int32_t>,fun<nm::EW_ADD, uint32_t, int64_t>,fun<nm::EW_ADD, uint32_t, float32_t>,fun<nm::EW_ADD, uint32_t, float64_t>,fun<nm::EW_ADD, uint32_t, nm::Complex64>,fun<nm::EW_ADD, uint32_t, nm::Complex128>,fun<nm::EW_ADD, uint32_t, nm::RubyObject>},\\\n{fun<nm::EW_ADD, uint64_t, uint8_t>,fun<nm::EW_ADD, uint64_t, int8_t>,fun<nm::EW_ADD, uint64_t, int16_t>,fun<nm::EW_ADD, uint64_t, int32_t>,fun<nm::EW_ADD, uint64_t, int64_t>,fun<nm::EW_ADD, uint64_t, float32_t>,fun<nm::EW_ADD, uint64_t, float64_t>,fun<nm::EW_ADD, uint64_t, nm::Complex64>,fun<nm::EW_ADD, uint64_t, nm::Complex128>,fun<nm::EW_ADD, uint64_t, nm::RubyObject>}},\\\n{{fun<nm::EW_SUB, uint8_t, uint8_t>,fun<nm::EW_SUB, uint8_t, int8_t>,fun<nm::EW_SUB, uint8_t, int16_t>,fun<nm::EW_SUB, uint8_t, int32_t>,fun<nm::EW_SUB, uint8_t, int64_t>,fun<nm::EW_SUB, uint8_t, float32_t>,fun<nm::EW_SUB, uint8_t, float64_t>,fun<nm::EW_SUB, uint8_t, nm::Complex64>,fun<nm::EW_SUB, uint8_t, nm::Complex128>,fun<nm::EW_SUB, uint8_t, nm::RubyObject>},\\\n{fun<nm::EW_SUB, uint16_t, uint8_t>,fun<nm::EW_SUB, uint16_t, int8_t>,fun<nm::EW_SUB, uint16_t, int16_t>,fun<nm::EW_SUB, uint16_t, int32_t>,fun<nm::EW_SUB, uint16_t, int64_t>,fun<nm::EW_SUB, uint16_t, float32_t>,fun<nm::EW_SUB, uint16_t, float64_t>,fun<nm::EW_SUB, uint16_t, nm::Complex64>,fun<nm::EW_SUB, uint16_t, nm::Complex128>,fun<nm::EW_SUB, uint16_t, nm::RubyObject>},\\\n{fun<nm::EW_SUB, uint32_t, uint8_t>,fun<nm::EW_SUB, uint32_t, int8_t>,fun<nm::EW_SUB, uint32_t, int16_t>,fun<nm::EW_SUB, uint32_t, int32_t>,fun<nm::EW_SUB, uint32_t, int64_t>,fun<nm::EW_SUB, uint32_t, float32_t>,fun<nm::EW_SUB, uint32_t, float64_t>,fun<nm::EW_SUB, uint32_t, nm::Complex64>,fun<nm::EW_SUB, uint32_t, nm::Complex128>,fun<nm::EW_SUB, uint32_t, nm::RubyObject>},\\\n{fun<nm::EW_SUB, uint64_t, uint8_t>,fun<nm::EW_SUB, uint64_t, int8_t>,fun<nm::EW_SUB, uint64_t, int16_t>,fun<nm::EW_SUB, uint64_t, int32_t>,fun<nm::EW_SUB, uint64_t, int64_t>,fun<nm::EW_SUB, uint64_t, float32_t>,fun<nm::EW_SUB, uint64_t, float64_t>,fun<nm::EW_SUB, uint64_t, nm::Complex64>,fun<nm::EW_SUB, uint64_t, nm::Complex128>,fun<nm::EW_SUB, uint64_t, nm::RubyObject>}},\\\n{{fun<nm::EW_MUL, uint8_t, uint8_t>,fun<nm::EW_MUL, uint8_t, int8_t>,fun<nm::EW_MUL, uint8_t, int16_t>,fun<nm::EW_MUL, uint8_t, int32_t>,fun<nm::EW_MUL, uint8_t, int64_t>,fun<nm::EW_MUL, uint8_t, float32_t>,fun<nm::EW_MUL, uint8_t, float64_t>,fun<nm::EW_MUL, uint8_t, nm::Complex64>,fun<nm::EW_MUL, uint8_t, nm::Complex128>,fun<nm::EW_MUL, uint8_t, nm::RubyObject>},\\\n{fun<nm::EW_MUL, uint16_t, uint8_t>,fun<nm::EW_MUL, uint16_t, int8_t>,fun<nm::EW_MUL, uint16_t, int16_t>,fun<nm::EW_MUL, uint16_t, int32_t>,fun<nm::EW_MUL, uint16_t, int64_t>,fun<nm::EW_MUL, uint16_t, float32_t>,fun<nm::EW_MUL, uint16_t, float64_t>,fun<nm::EW_MUL, uint16_t, nm::Complex64>,fun<nm::EW_MUL, uint16_t, nm::Complex128>,fun<nm::EW_MUL, uint16_t, nm::RubyObject>},\\\n{fun<nm::EW_MUL, uint32_t, uint8_t>,fun<nm::EW_MUL, uint32_t, int8_t>,fun<nm::EW_MUL, uint32_t, int16_t>,fun<nm::EW_MUL, uint32_t, int32_t>,fun<nm::EW_MUL, uint32_t, int64_t>,fun<nm::EW_MUL, uint32_t, float32_t>,fun<nm::EW_MUL, uint32_t, float64_t>,fun<nm::EW_MUL, uint32_t, nm::Complex64>,fun<nm::EW_MUL, uint32_t, nm::Complex128>,fun<nm::EW_MUL, uint32_t, nm::RubyObject>},\\\n{fun<nm::EW_MUL, uint64_t, uint8_t>,fun<nm::EW_MUL, uint64_t, int8_t>,fun<nm::EW_MUL, uint64_t, int16_t>,fun<nm::EW_MUL, uint64_t, int32_t>,fun<nm::EW_MUL, uint64_t, int64_t>,fun<nm::EW_MUL, uint64_t, float32_t>,fun<nm::EW_MUL, uint64_t, float64_t>,fun<nm::EW_MUL, uint64_t, nm::Complex64>,fun<nm::EW_MUL, uint64_t, nm::Complex128>,fun<nm::EW_MUL, uint64_t, nm::RubyObject>}},\\\n{{fun<nm::EW_DIV, uint8_t, uint8_t>,fun<nm::EW_DIV, uint8_t, int8_t>,fun<nm::EW_DIV, uint8_t, int16_t>,fun<nm::EW_DIV, uint8_t, int32_t>,fun<nm::EW_DIV, uint8_t, int64_t>,fun<nm::EW_DIV, uint8_t, float32_t>,fun<nm::EW_DIV, uint8_t, float64_t>,fun<nm::EW_DIV, uint8_t, nm::Complex64>,fun<nm::EW_DIV, uint8_t, nm::Complex128>,fun<nm::EW_DIV, uint8_t, nm::RubyObject>},\\\n{fun<nm::EW_DIV, uint16_t, uint8_t>,fun<nm::EW_DIV, uint16_t, int8_t>,fun<nm::EW_DIV, uint16_t, int16_t>,fun<nm::EW_DIV, uint16_t, int32_t>,fun<nm::EW_DIV, uint16_t, int64_t>,fun<nm::EW_DIV, uint16_t, float32_t>,fun<nm::EW_DIV, uint16_t, float64_t>,fun<nm::EW_DIV, uint16_t, nm::Complex64>,fun<nm::EW_DIV, uint16_t, nm::Complex128>,fun<nm::EW_DIV, uint16_t, nm::RubyObject>},\\\n{fun<nm::EW_DIV, uint32_t, uint8_t>,fun<nm::EW_DIV, uint32_t, int8_t>,fun<nm::EW_DIV, uint32_t, int16_t>,fun<nm::EW_DIV, uint32_t, int32_t>,fun<nm::EW_DIV, uint32_t, int64_t>,fun<nm::EW_DIV, uint32_t, float32_t>,fun<nm::EW_DIV, uint32_t, float64_t>,fun<nm::EW_DIV, uint32_t, nm::Complex64>,fun<nm::EW_DIV, uint32_t, nm::Complex128>,fun<nm::EW_DIV, uint32_t, nm::RubyObject>},\\\n{fun<nm::EW_DIV, uint64_t, uint8_t>,fun<nm::EW_DIV, uint64_t, int8_t>,fun<nm::EW_DIV, uint64_t, int16_t>,fun<nm::EW_DIV, uint64_t, int32_t>,fun<nm::EW_DIV, uint64_t, int64_t>,fun<nm::EW_DIV, uint64_t, float32_t>,fun<nm::EW_DIV, uint64_t, float64_t>,fun<nm::EW_DIV, uint64_t, nm::Complex64>,fun<nm::EW_DIV, uint64_t, nm::Complex128>,fun<nm::EW_DIV, uint64_t, nm::RubyObject>}},\\\n{{fun<nm::EW_MOD, uint8_t, uint8_t>,fun<nm::EW_MOD, uint8_t, int8_t>,fun<nm::EW_MOD, uint8_t, int16_t>,fun<nm::EW_MOD, uint8_t, int32_t>,fun<nm::EW_MOD, uint8_t, int64_t>,fun<nm::EW_MOD, uint8_t, float32_t>,fun<nm::EW_MOD, uint8_t, float64_t>,fun<nm::EW_MOD, uint8_t, nm::Complex64>,fun<nm::EW_MOD, uint8_t, nm::Complex128>,fun<nm::EW_MOD, uint8_t, nm::RubyObject>},\\\n{fun<nm::EW_MOD, uint16_t, uint8_t>,fun<nm::EW_MOD, uint16_t, int8_t>,fun<nm::EW_MOD, uint16_t, int16_t>,fun<nm::EW_MOD, uint16_t, int32_t>,fun<nm::EW_MOD, uint16_t, int64_t>,fun<nm::EW_MOD, uint16_t, float32_t>,fun<nm::EW_MOD, uint16_t, float64_t>,fun<nm::EW_MOD, uint16_t, nm::Complex64>,fun<nm::EW_MOD, uint16_t, nm::Complex128>,fun<nm::EW_MOD, uint16_t, nm::RubyObject>},\\\n{fun<nm::EW_MOD, uint32_t, uint8_t>,fun<nm::EW_MOD, uint32_t, int8_t>,fun<nm::EW_MOD, uint32_t, int16_t>,fun<nm::EW_MOD, uint32_t, int32_t>,fun<nm::EW_MOD, uint32_t, int64_t>,fun<nm::EW_MOD, uint32_t, float32_t>,fun<nm::EW_MOD, uint32_t, float64_t>,fun<nm::EW_MOD, uint32_t, nm::Complex64>,fun<nm::EW_MOD, uint32_t, nm::Complex128>,fun<nm::EW_MOD, uint32_t, nm::RubyObject>},\\\n{fun<nm::EW_MOD, uint64_t, uint8_t>,fun<nm::EW_MOD, uint64_t, int8_t>,fun<nm::EW_MOD, uint64_t, int16_t>,fun<nm::EW_MOD, uint64_t, int32_t>,fun<nm::EW_MOD, uint64_t, int64_t>,fun<nm::EW_MOD, uint64_t, float32_t>,fun<nm::EW_MOD, uint64_t, float64_t>,fun<nm::EW_MOD, uint64_t, nm::Complex64>,fun<nm::EW_MOD, uint64_t, nm::Complex128>,fun<nm::EW_MOD, uint64_t, nm::RubyObject>}},\\\n{{fun<nm::EW_EQEQ, uint8_t, uint8_t>,fun<nm::EW_EQEQ, uint8_t, int8_t>,fun<nm::EW_EQEQ, uint8_t, int16_t>,fun<nm::EW_EQEQ, uint8_t, int32_t>,fun<nm::EW_EQEQ, uint8_t, int64_t>,fun<nm::EW_EQEQ, uint8_t, float32_t>,fun<nm::EW_EQEQ, uint8_t, float64_t>,fun<nm::EW_EQEQ, uint8_t, nm::Complex64>,fun<nm::EW_EQEQ, uint8_t, nm::Complex128>,fun<nm::EW_EQEQ, uint8_t, nm::RubyObject>},\\\n{fun<nm::EW_EQEQ, uint16_t, uint8_t>,fun<nm::EW_EQEQ, uint16_t, int8_t>,fun<nm::EW_EQEQ, uint16_t, int16_t>,fun<nm::EW_EQEQ, uint16_t, int32_t>,fun<nm::EW_EQEQ, uint16_t, int64_t>,fun<nm::EW_EQEQ, uint16_t, float32_t>,fun<nm::EW_EQEQ, uint16_t, float64_t>,fun<nm::EW_EQEQ, uint16_t, nm::Complex64>,fun<nm::EW_EQEQ, uint16_t, nm::Complex128>,fun<nm::EW_EQEQ, uint16_t, nm::RubyObject>},\\\n{fun<nm::EW_EQEQ, uint32_t, uint8_t>,fun<nm::EW_EQEQ, uint32_t, int8_t>,fun<nm::EW_EQEQ, uint32_t, int16_t>,fun<nm::EW_EQEQ, uint32_t, int32_t>,fun<nm::EW_EQEQ, uint32_t, int64_t>,fun<nm::EW_EQEQ, uint32_t, float32_t>,fun<nm::EW_EQEQ, uint32_t, float64_t>,fun<nm::EW_EQEQ, uint32_t, nm::Complex64>,fun<nm::EW_EQEQ, uint32_t, nm::Complex128>,fun<nm::EW_EQEQ, uint32_t, nm::RubyObject>},\\\n{fun<nm::EW_EQEQ, uint64_t, uint8_t>,fun<nm::EW_EQEQ, uint64_t, int8_t>,fun<nm::EW_EQEQ, uint64_t, int16_t>,fun<nm::EW_EQEQ, uint64_t, int32_t>,fun<nm::EW_EQEQ, uint64_t, int64_t>,fun<nm::EW_EQEQ, uint64_t, float32_t>,fun<nm::EW_EQEQ, uint64_t, float64_t>,fun<nm::EW_EQEQ, uint64_t, nm::Complex64>,fun<nm::EW_EQEQ, uint64_t, nm::Complex128>,fun<nm::EW_EQEQ, uint64_t, nm::RubyObject>}},\\\n{{fun<nm::EW_NEQ, uint8_t, uint8_t>,fun<nm::EW_NEQ, uint8_t, int8_t>,fun<nm::EW_NEQ, uint8_t, int16_t>,fun<nm::EW_NEQ, uint8_t, int32_t>,fun<nm::EW_NEQ, uint8_t, int64_t>,fun<nm::EW_NEQ, uint8_t, float32_t>,fun<nm::EW_NEQ, uint8_t, float64_t>,fun<nm::EW_NEQ, uint8_t, nm::Complex64>,fun<nm::EW_NEQ, uint8_t, nm::Complex128>,fun<nm::EW_NEQ, uint8_t, nm::RubyObject>},\\\n{fun<nm::EW_NEQ, uint16_t, uint8_t>,fun<nm::EW_NEQ, uint16_t, int8_t>,fun<nm::EW_NEQ, uint16_t, int16_t>,fun<nm::EW_NEQ, uint16_t, int32_t>,fun<nm::EW_NEQ, uint16_t, int64_t>,fun<nm::EW_NEQ, uint16_t, float32_t>,fun<nm::EW_NEQ, uint16_t, float64_t>,fun<nm::EW_NEQ, uint16_t, nm::Complex64>,fun<nm::EW_NEQ, uint16_t, nm::Complex128>,fun<nm::EW_NEQ, uint16_t, nm::RubyObject>},\\\n{fun<nm::EW_NEQ, uint32_t, uint8_t>,fun<nm::EW_NEQ, uint32_t, int8_t>,fun<nm::EW_NEQ, uint32_t, int16_t>,fun<nm::EW_NEQ, uint32_t, int32_t>,fun<nm::EW_NEQ, uint32_t, int64_t>,fun<nm::EW_NEQ, uint32_t, float32_t>,fun<nm::EW_NEQ, uint32_t, float64_t>,fun<nm::EW_NEQ, uint32_t, nm::Complex64>,fun<nm::EW_NEQ, uint32_t, nm::Complex128>,fun<nm::EW_NEQ, uint32_t, nm::RubyObject>},\\\n{fun<nm::EW_NEQ, uint64_t, uint8_t>,fun<nm::EW_NEQ, uint64_t, int8_t>,fun<nm::EW_NEQ, uint64_t, int16_t>,fun<nm::EW_NEQ, uint64_t, int32_t>,fun<nm::EW_NEQ, uint64_t, int64_t>,fun<nm::EW_NEQ, uint64_t, float32_t>,fun<nm::EW_NEQ, uint64_t, float64_t>,fun<nm::EW_NEQ, uint64_t, nm::Complex64>,fun<nm::EW_NEQ, uint64_t, nm::Complex128>,fun<nm::EW_NEQ, uint64_t, nm::RubyObject>}},\\\n{{fun<nm::EW_LT, uint8_t, uint8_t>,fun<nm::EW_LT, uint8_t, int8_t>,fun<nm::EW_LT, uint8_t, int16_t>,fun<nm::EW_LT, uint8_t, int32_t>,fun<nm::EW_LT, uint8_t, int64_t>,fun<nm::EW_LT, uint8_t, float32_t>,fun<nm::EW_LT, uint8_t, float64_t>,fun<nm::EW_LT, uint8_t, nm::Complex64>,fun<nm::EW_LT, uint8_t, nm::Complex128>,fun<nm::EW_LT, uint8_t, nm::RubyObject>},\\\n{fun<nm::EW_LT, uint16_t, uint8_t>,fun<nm::EW_LT, uint16_t, int8_t>,fun<nm::EW_LT, uint16_t, int16_t>,fun<nm::EW_LT, uint16_t, int32_t>,fun<nm::EW_LT, uint16_t, int64_t>,fun<nm::EW_LT, uint16_t, float32_t>,fun<nm::EW_LT, uint16_t, float64_t>,fun<nm::EW_LT, uint16_t, nm::Complex64>,fun<nm::EW_LT, uint16_t, nm::Complex128>,fun<nm::EW_LT, uint16_t, nm::RubyObject>},\\\n{fun<nm::EW_LT, uint32_t, uint8_t>,fun<nm::EW_LT, uint32_t, int8_t>,fun<nm::EW_LT, uint32_t, int16_t>,fun<nm::EW_LT, uint32_t, int32_t>,fun<nm::EW_LT, uint32_t, int64_t>,fun<nm::EW_LT, uint32_t, float32_t>,fun<nm::EW_LT, uint32_t, float64_t>,fun<nm::EW_LT, uint32_t, nm::Complex64>,fun<nm::EW_LT, uint32_t, nm::Complex128>,fun<nm::EW_LT, uint32_t, nm::RubyObject>},\\\n{fun<nm::EW_LT, uint64_t, uint8_t>,fun<nm::EW_LT, uint64_t, int8_t>,fun<nm::EW_LT, uint64_t, int16_t>,fun<nm::EW_LT, uint64_t, int32_t>,fun<nm::EW_LT, uint64_t, int64_t>,fun<nm::EW_LT, uint64_t, float32_t>,fun<nm::EW_LT, uint64_t, float64_t>,fun<nm::EW_LT, uint64_t, nm::Complex64>,fun<nm::EW_LT, uint64_t, nm::Complex128>,fun<nm::EW_LT, uint64_t, nm::RubyObject>}},\\\n{{fun<nm::EW_GT, uint8_t, uint8_t>,fun<nm::EW_GT, uint8_t, int8_t>,fun<nm::EW_GT, uint8_t, int16_t>,fun<nm::EW_GT, uint8_t, int32_t>,fun<nm::EW_GT, uint8_t, int64_t>,fun<nm::EW_GT, uint8_t, float32_t>,fun<nm::EW_GT, uint8_t, float64_t>,fun<nm::EW_GT, uint8_t, nm::Complex64>,fun<nm::EW_GT, uint8_t, nm::Complex128>,fun<nm::EW_GT, uint8_t, nm::RubyObject>},\\\n{fun<nm::EW_GT, uint16_t, uint8_t>,fun<nm::EW_GT, uint16_t, int8_t>,fun<nm::EW_GT, uint16_t, int16_t>,fun<nm::EW_GT, uint16_t, int32_t>,fun<nm::EW_GT, uint16_t, int64_t>,fun<nm::EW_GT, uint16_t, float32_t>,fun<nm::EW_GT, uint16_t, float64_t>,fun<nm::EW_GT, uint16_t, nm::Complex64>,fun<nm::EW_GT, uint16_t, nm::Complex128>,fun<nm::EW_GT, uint16_t, nm::RubyObject>},\\\n{fun<nm::EW_GT, uint32_t, uint8_t>,fun<nm::EW_GT, uint32_t, int8_t>,fun<nm::EW_GT, uint32_t, int16_t>,fun<nm::EW_GT, uint32_t, int32_t>,fun<nm::EW_GT, uint32_t, int64_t>,fun<nm::EW_GT, uint32_t, float32_t>,fun<nm::EW_GT, uint32_t, float64_t>,fun<nm::EW_GT, uint32_t, nm::Complex64>,fun<nm::EW_GT, uint32_t, nm::Complex128>,fun<nm::EW_GT, uint32_t, nm::RubyObject>},\\\n{fun<nm::EW_GT, uint64_t, uint8_t>,fun<nm::EW_GT, uint64_t, int8_t>,fun<nm::EW_GT, uint64_t, int16_t>,fun<nm::EW_GT, uint64_t, int32_t>,fun<nm::EW_GT, uint64_t, int64_t>,fun<nm::EW_GT, uint64_t, float32_t>,fun<nm::EW_GT, uint64_t, float64_t>,fun<nm::EW_GT, uint64_t, nm::Complex64>,fun<nm::EW_GT, uint64_t, nm::Complex128>,fun<nm::EW_GT, uint64_t, nm::RubyObject>}},\\\n{{fun<nm::EW_LEQ, uint8_t, uint8_t>,fun<nm::EW_LEQ, uint8_t, int8_t>,fun<nm::EW_LEQ, uint8_t, int16_t>,fun<nm::EW_LEQ, uint8_t, int32_t>,fun<nm::EW_LEQ, uint8_t, int64_t>,fun<nm::EW_LEQ, uint8_t, float32_t>,fun<nm::EW_LEQ, uint8_t, float64_t>,fun<nm::EW_LEQ, uint8_t, nm::Complex64>,fun<nm::EW_LEQ, uint8_t, nm::Complex128>,fun<nm::EW_LEQ, uint8_t, nm::RubyObject>},\\\n{fun<nm::EW_LEQ, uint16_t, uint8_t>,fun<nm::EW_LEQ, uint16_t, int8_t>,fun<nm::EW_LEQ, uint16_t, int16_t>,fun<nm::EW_LEQ, uint16_t, int32_t>,fun<nm::EW_LEQ, uint16_t, int64_t>,fun<nm::EW_LEQ, uint16_t, float32_t>,fun<nm::EW_LEQ, uint16_t, float64_t>,fun<nm::EW_LEQ, uint16_t, nm::Complex64>,fun<nm::EW_LEQ, uint16_t, nm::Complex128>,fun<nm::EW_LEQ, uint16_t, nm::RubyObject>},\\\n{fun<nm::EW_LEQ, uint32_t, uint8_t>,fun<nm::EW_LEQ, uint32_t, int8_t>,fun<nm::EW_LEQ, uint32_t, int16_t>,fun<nm::EW_LEQ, uint32_t, int32_t>,fun<nm::EW_LEQ, uint32_t, int64_t>,fun<nm::EW_LEQ, uint32_t, float32_t>,fun<nm::EW_LEQ, uint32_t, float64_t>,fun<nm::EW_LEQ, uint32_t, nm::Complex64>,fun<nm::EW_LEQ, uint32_t, nm::Complex128>,fun<nm::EW_LEQ, uint32_t, nm::RubyObject>},\\\n{fun<nm::EW_LEQ, uint64_t, uint8_t>,fun<nm::EW_LEQ, uint64_t, int8_t>,fun<nm::EW_LEQ, uint64_t, int16_t>,fun<nm::EW_LEQ, uint64_t, int32_t>,fun<nm::EW_LEQ, uint64_t, int64_t>,fun<nm::EW_LEQ, uint64_t, float32_t>,fun<nm::EW_LEQ, uint64_t, float64_t>,fun<nm::EW_LEQ, uint64_t, nm::Complex64>,fun<nm::EW_LEQ, uint64_t, nm::Complex128>,fun<nm::EW_LEQ, uint64_t, nm::RubyObject>}},\\\n{{fun<nm::EW_GEQ, uint8_t, uint8_t>,fun<nm::EW_GEQ, uint8_t, int8_t>,fun<nm::EW_GEQ, uint8_t, int16_t>,fun<nm::EW_GEQ, uint8_t, int32_t>,fun<nm::EW_GEQ, uint8_t, int64_t>,fun<nm::EW_GEQ, uint8_t, float32_t>,fun<nm::EW_GEQ, uint8_t, float64_t>,fun<nm::EW_GEQ, uint8_t, nm::Complex64>,fun<nm::EW_GEQ, uint8_t, nm::Complex128>,fun<nm::EW_GEQ, uint8_t, nm::RubyObject>},\\\n{fun<nm::EW_GEQ, uint16_t, uint8_t>,fun<nm::EW_GEQ, uint16_t, int8_t>,fun<nm::EW_GEQ, uint16_t, int16_t>,fun<nm::EW_GEQ, uint16_t, int32_t>,fun<nm::EW_GEQ, uint16_t, int64_t>,fun<nm::EW_GEQ, uint16_t, float32_t>,fun<nm::EW_GEQ, uint16_t, float64_t>,fun<nm::EW_GEQ, uint16_t, nm::Complex64>,fun<nm::EW_GEQ, uint16_t, nm::Complex128>,fun<nm::EW_GEQ, uint16_t, nm::RubyObject>},\\\n{fun<nm::EW_GEQ, uint32_t, uint8_t>,fun<nm::EW_GEQ, uint32_t, int8_t>,fun<nm::EW_GEQ, uint32_t, int16_t>,fun<nm::EW_GEQ, uint32_t, int32_t>,fun<nm::EW_GEQ, uint32_t, int64_t>,fun<nm::EW_GEQ, uint32_t, float32_t>,fun<nm::EW_GEQ, uint32_t, float64_t>,fun<nm::EW_GEQ, uint32_t, nm::Complex64>,fun<nm::EW_GEQ, uint32_t, nm::Complex128>,fun<nm::EW_GEQ, uint32_t, nm::RubyObject>},\\\n{fun<nm::EW_GEQ, uint64_t, uint8_t>,fun<nm::EW_GEQ, uint64_t, int8_t>,fun<nm::EW_GEQ, uint64_t, int16_t>,fun<nm::EW_GEQ, uint64_t, int32_t>,fun<nm::EW_GEQ, uint64_t, int64_t>,fun<nm::EW_GEQ, uint64_t, float32_t>,fun<nm::EW_GEQ, uint64_t, float64_t>,fun<nm::EW_GEQ, uint64_t, nm::Complex64>,fun<nm::EW_GEQ, uint64_t, nm::Complex128>,fun<nm::EW_GEQ, uint64_t, nm::RubyObject>}}};\n\n\nextern \"C\" {\n\n\n/*\n * Data\n */\n\n// regular data types\nextern const char* const  DTYPE_NAMES[nm::NUM_DTYPES];\nextern const size_t       DTYPE_SIZES[nm::NUM_DTYPES];\n\nextern const nm::dtype_t Upcast[nm::NUM_DTYPES][nm::NUM_DTYPES];\n\n\n/*\n * Functions\n */\n\n\nvoid*            rubyobj_to_cval(VALUE val, nm::dtype_t dtype);\nvoid            rubyval_to_cval(VALUE val, nm::dtype_t dtype, void* loc);\n\nvoid nm_init_data();\n\n} // end of extern \"C\" block\n\n#endif // DATA_H\n"
  },
  {
    "path": "ext/nmatrix/data/meta.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == meta.h\n//\n// Header file for dealing with template metaprogramming.\n\n#ifndef META_H\n# define META_H\n\nnamespace nm {\n  /*\n   * Template Metaprogramming\n   */\n  template <typename T> struct ctype_to_dtype_enum {\n    static const nm::dtype_t value_type = nm::BYTE;\n  };\n  template <> struct ctype_to_dtype_enum<uint8_t> { static const nm::dtype_t value_type = nm::BYTE; };\n  template <> struct ctype_to_dtype_enum<int8_t>  { static const nm::dtype_t value_type = nm::INT8; };\n  template <> struct ctype_to_dtype_enum<int16_t> { static const nm::dtype_t value_type = nm::INT16; };\n  template <> struct ctype_to_dtype_enum<int32_t> { static const nm::dtype_t value_type = nm::INT32; };\n  template <> struct ctype_to_dtype_enum<int64_t> { static const nm::dtype_t value_type = nm::INT64; };\n  template <> struct ctype_to_dtype_enum<float>   { static const nm::dtype_t value_type = nm::FLOAT32; };\n  template <> struct ctype_to_dtype_enum<double>  { static const nm::dtype_t value_type = nm::FLOAT64; };\n  template <> struct ctype_to_dtype_enum<Complex64>   { static const nm::dtype_t value_type = nm::COMPLEX64; };\n  template <> struct ctype_to_dtype_enum<Complex128>  { static const nm::dtype_t value_type = nm::COMPLEX128; };\n  template <> struct ctype_to_dtype_enum<RubyObject>  { static const nm::dtype_t value_type = nm::RUBYOBJ; };\n\n\n  template <nm::dtype_t Enum> struct dtype_enum_T;\n  template <> struct dtype_enum_T<nm::BYTE> { typedef uint8_t type; };\n  template <> struct dtype_enum_T<nm::INT8>  { typedef int8_t type;  };\n  template <> struct dtype_enum_T<nm::INT16> { typedef int16_t type; };\n  template <> struct dtype_enum_T<nm::INT32> { typedef int32_t type; };\n  template <> struct dtype_enum_T<nm::INT64> { typedef int64_t type; };\n  template <> struct dtype_enum_T<nm::FLOAT32> { typedef float type; };\n  template <> struct dtype_enum_T<nm::FLOAT64> { typedef double type; };\n  template <> struct dtype_enum_T<nm::COMPLEX64> { typedef nm::Complex64 type; };\n  template <> struct dtype_enum_T<nm::COMPLEX128> { typedef nm::Complex128 type; };\n  template <> struct dtype_enum_T<nm::RUBYOBJ> { typedef nm::RubyObject type; };\n\n} // end namespace nm\n\n#endif\n"
  },
  {
    "path": "ext/nmatrix/data/ruby_object.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == ruby_object.h\n//\n// Functions and classes for dealing with Ruby objects.\n\n#ifndef RUBY_OBJECT_H\n#define RUBY_OBJECT_H\n\n/*\n * Standard Includes\n */\n\n#include <ruby.h>\n#include <iostream>\n#include <type_traits>\n\n/*\n * Project Includes\n */\n\n#include \"ruby_constants.h\"\n\n/*\n * Macros\n */\n#define NM_RUBYVAL_IS_NUMERIC(val)                (FIXNUM_P(val) or RB_FLOAT_TYPE_P(val) or RB_TYPE_P(val, T_COMPLEX))\n\n/*\n * Classes and Functions\n */\n\nnamespace nm {\ntemplate<typename T, typename U>\nstruct made_from_same_template : std::false_type {}; \n \ntemplate<template<typename> class Templ, typename Arg1, typename Arg2>\nstruct made_from_same_template<Templ<Arg1>, Templ<Arg2> > : std::true_type {};\n\nclass RubyObject {\n  public:\n  VALUE rval;\n  \n  /*\n   * Value constructor.\n   */\n  inline RubyObject(VALUE ref = Qnil) : rval(ref) {}\n  \n  /*\n   * Complex number constructor.\n   */\n  template <typename FloatType, typename = typename std::enable_if<std::is_floating_point<FloatType>::value>::type>\n  inline RubyObject(const Complex<FloatType>& other) : rval(rb_complex_new(rb_float_new(other.r), rb_float_new(other.i))) {}\n  \n  /*\n   * Integer constructor.\n   *\n   * Does not work as a template.\n   */\n  inline RubyObject(uint8_t other)  : rval(INT2FIX(other)) {}\n  inline RubyObject(int8_t other)   : rval(INT2FIX(other)) {}\n  inline RubyObject(int16_t other)  : rval(INT2FIX(other)) {}\n  inline RubyObject(uint16_t other) : rval(INT2FIX(other)) {}\n  inline RubyObject(int32_t other)  : rval(INT2FIX(other)) {}\n  // there is no uint32_t here because that's a Ruby VALUE type, and we need the compiler to treat that as a VALUE.\n  inline RubyObject(int64_t other)  : rval(INT2FIX(other)) {}\n//  inline RubyObject(uint64_t other) : rval(INT2FIX(other)) {}\n\n\n  /*\n   * Float constructor.\n   *\n   * Does not work as a template.\n   */\n  inline RubyObject(float other)   : rval(rb_float_new(other)) {}\n  inline RubyObject(double other)  : rval(rb_float_new(other)) {}\n\n  /*\n   * Operators for converting RubyObjects to other C types.\n   */\n\n#define RETURN_OBJ2NUM(mac)   if (this->rval == Qtrue) return 1; else if (this->rval == Qfalse) return 0; else return mac(this->rval);\n\n  inline operator int8_t()  const { RETURN_OBJ2NUM(NUM2INT)         }\n  inline operator uint8_t() const { RETURN_OBJ2NUM(NUM2UINT)        }\n  inline operator int16_t() const { RETURN_OBJ2NUM(NUM2INT)         }\n  inline operator uint16_t() const { RETURN_OBJ2NUM(NUM2UINT)       }\n  inline operator int32_t() const { RETURN_OBJ2NUM(NUM2LONG)        }\n  inline operator VALUE() const { return rval; }\n  //inline operator uint32_t() const { return NUM2ULONG(this->rval);      }\n  inline operator int64_t() const { RETURN_OBJ2NUM(NUM2LONG)        }\n  //inline operator uint64_t() const { RETURN_OBJ2NUM(NUM2ULONG)      }\n  inline operator double()   const { RETURN_OBJ2NUM(NUM2DBL)        }\n  inline operator float()  const { RETURN_OBJ2NUM(NUM2DBL)          }\n\n  inline operator Complex64() const { return this->to<Complex64>(); }\n  inline operator Complex128() const { return this->to<Complex128>(); }\n  /*\n   * Copy constructors.\n   */\n  inline RubyObject(const RubyObject& other) : rval(other.rval) {}\n\n  /*\n   * Inverse operator.\n   */\n  inline RubyObject inverse() const {\n    rb_raise(rb_eNotImpError, \"RubyObject#inverse needs to be implemented\");\n  }\n\n  /*\n   * Absolute value.\n   */\n  inline RubyObject abs() const {\n    return RubyObject(rb_funcall(this->rval, rb_intern(\"abs\"), 0));\n  }\n\n  /*\n   * Binary operator definitions.\n   */\n  \n  inline RubyObject operator+(const RubyObject& other) const {\n    return RubyObject(rb_funcall(this->rval, nm_rb_add, 1, other.rval));\n  }\n\n  inline RubyObject& operator+=(const RubyObject& other) {\n    this->rval = rb_funcall(this->rval, nm_rb_add, 1, other.rval);\n    return *this;\n  }\n\n  inline RubyObject operator-(const RubyObject& other) const {\n    return RubyObject(rb_funcall(this->rval, nm_rb_sub, 1, other.rval));\n  }\n\n  inline RubyObject& operator-=(const RubyObject& other) {\n    this->rval = rb_funcall(this->rval, nm_rb_sub, 1, other.rval);\n    return *this;\n  }\n  \n  inline RubyObject operator*(const RubyObject& other) const {\n    return RubyObject(rb_funcall(this->rval, nm_rb_mul, 1, other.rval));\n  }\n\n  inline RubyObject& operator*=(const RubyObject& other) {\n    this->rval = rb_funcall(this->rval, nm_rb_mul, 1, other.rval);\n    return *this;\n  }\n  \n  inline RubyObject operator/(const RubyObject& other) const {\n    return RubyObject(rb_funcall(this->rval, nm_rb_div, 1, other.rval));\n  }\n\n  inline RubyObject& operator/=(const RubyObject& other) {\n    this->rval = rb_funcall(this->rval, nm_rb_div, 1, other.rval);\n    return *this;\n  }\n  \n  inline RubyObject operator%(const RubyObject& other) const {\n    return RubyObject(rb_funcall(this->rval, nm_rb_percent, 1, other.rval));\n  }\n  \n  inline bool operator>(const RubyObject& other) const {\n    return rb_funcall(this->rval, nm_rb_gt, 1, other.rval) == Qtrue;\n  }\n  \n  inline bool operator<(const RubyObject& other) const {\n    return rb_funcall(this->rval, nm_rb_lt, 1, other.rval) == Qtrue;\n  }\n\n  template <typename OtherType>\n  inline bool operator<(const OtherType& other) const {\n    return *this < RubyObject(other);\n  }\n  \n  inline bool operator==(const RubyObject& other) const {\n    return rb_funcall(this->rval, nm_rb_eql, 1, other.rval) == Qtrue;\n  }\n\n  template <typename OtherType>\n  inline bool operator==(const OtherType& other) const {\n    return *this == RubyObject(other);\n  }\n  \n  inline bool operator!=(const RubyObject& other) const {\n    return rb_funcall(this->rval, nm_rb_neql, 1, other.rval) == Qtrue;\n  }\n\n  template <typename OtherType>\n  inline bool operator!=(const OtherType& other) const {\n    return *this != RubyObject(other);\n  }\n  \n  inline bool operator>=(const RubyObject& other) const {\n    return rb_funcall(this->rval, nm_rb_gte, 1, other.rval) == Qtrue;\n  }\n\n  template <typename OtherType>\n  inline bool operator>=(const OtherType& other) const {\n    return *this >= RubyObject(other);\n  }\n  \n  inline bool operator<=(const RubyObject& other) const {\n    return rb_funcall(this->rval, nm_rb_lte, 1, other.rval) == Qtrue;\n  }\n\n  template <typename OtherType>\n  inline bool operator<=(const OtherType& other) const {\n    return *this <= RubyObject(other);\n  }\n\n  ////////////////////////////\n  // RUBY-NATIVE OPERATIONS //\n  ////////////////////////////\n/*\n  template <typename NativeType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\n  inline bool operator==(const NativeType& other) const {\n    return *this == RubyObject(other);\n  }\n\n  template <typename NativeType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\n  inline bool operator!=(const NativeType& other) const {\n    return *this != RubyObject(other);\n  }\n*/\n  //////////////////////////////\n  // RUBY-COMPLEX OPERATIONS //\n  //////////////////////////////\n\n  template <typename FloatType, typename = typename std::enable_if<std::is_floating_point<FloatType>::value>::type>\n  inline bool operator==(const Complex<FloatType>& other) const {\n    return *this == RubyObject(other);\n  }\n\n  template <typename FloatType, typename = typename std::enable_if<std::is_floating_point<FloatType>::value>::type>\n  inline bool operator!=(const Complex<FloatType>& other) const {\n    return *this != RubyObject(other);\n  }\n\n  /*\n   * Convert a Ruby object to an integer.\n   */\n  template <typename IntType>\n  inline typename std::enable_if<std::is_integral<IntType>::value, IntType>::type to(void) {\n    return NUM2INT(this->rval);\n  }\n  \n  /*\n   * Convert a Ruby object to a floating point number.\n   */\n  template <typename FloatType>\n  inline typename std::enable_if<std::is_floating_point<FloatType>::value, FloatType>::type to(void) {\n    return NUM2DBL(this->rval);\n  }\n  \n  /*\n   * Convert a Ruby object to a complex number.\n   */\n  template <typename ComplexType>\n  inline typename std::enable_if<made_from_same_template<ComplexType, Complex64>::value, ComplexType>::type to(void) const {\n    if (FIXNUM_P(this->rval) or TYPE(this->rval) == T_FLOAT) {\n      return ComplexType(NUM2DBL(this->rval));\n      \n    } else if (TYPE(this->rval) == T_COMPLEX) {\n      return ComplexType(NUM2DBL(rb_funcall(this->rval, nm_rb_real, 0)), NUM2DBL(rb_funcall(this->rval, nm_rb_imag, 0)));\n      \n    } else {\n      rb_raise(rb_eTypeError, \"Invalid conversion to Complex type.\");\n    }\n  }\n};\n  \n// Negative operator\ninline RubyObject operator-(const RubyObject& rhs) {\n  return RubyObject(rb_funcall(rhs.rval, nm_rb_negate, 0));\n}\n\n\n////////////////////////////\n// NATIVE-RUBY OPERATIONS //\n////////////////////////////\n\ntemplate <typename NativeType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\ninline RubyObject operator/(const NativeType left, const RubyObject& right) {\n  return RubyObject(left) / right;\n}\n\ntemplate <typename NativeType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\ninline bool operator==(const NativeType left, const RubyObject& right) {\n  return RubyObject(left) == right;\n}\n\ntemplate <typename NativeType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\ninline bool operator!=(const NativeType left, const RubyObject& right) {\n  return RubyObject(left) != right;\n}\n\ntemplate <typename NativeType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\ninline bool operator<=(const NativeType left, const RubyObject& right) {\n  return RubyObject(left) <= right;\n}\n\ntemplate <typename NativeType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\ninline bool operator>=(const NativeType left, const RubyObject& right) {\n  return RubyObject(left) >= right;\n}\n\ntemplate <typename NativeType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\ninline bool operator<(const NativeType left, const RubyObject& right) {\n  return RubyObject(left) < right;\n}\n\ntemplate <typename NativeType, typename = typename std::enable_if<std::is_arithmetic<NativeType>::value>::type>\ninline bool operator>(const NativeType left, const RubyObject& right) {\n  return RubyObject(left) > right;\n}\n\n\n/////////////////////////////\n// COMPLEX-RUBY OPERATIONS //\n/////////////////////////////\n\ntemplate <typename FloatType, typename = typename std::enable_if<std::is_floating_point<FloatType>::value>::type>\ninline bool operator==(const Complex<FloatType>& left, const RubyObject& right) {\n  return RubyObject(left) == right;\n}\n\ntemplate <typename FloatType, typename = typename std::enable_if<std::is_floating_point<FloatType>::value>::type>\ninline bool operator!=(const Complex<FloatType>& left, const RubyObject& right) {\n  return RubyObject(left) != right;\n}\n\ntemplate <typename FloatType, typename = typename std::enable_if<std::is_floating_point<FloatType>::value>::type>\ninline bool operator<=(const Complex<FloatType>& left, const RubyObject& right) {\n  return RubyObject(left) <= right;\n}\n\ntemplate <typename FloatType, typename = typename std::enable_if<std::is_floating_point<FloatType>::value>::type>\ninline bool operator>=(const Complex<FloatType>& left, const RubyObject& right) {\n  return RubyObject(left) >= right;\n}\n\ntemplate <typename FloatType, typename = typename std::enable_if<std::is_floating_point<FloatType>::value>::type>\ninline bool operator<(const Complex<FloatType>& left, const RubyObject& right) {\n  return RubyObject(left) < right;\n}\n\ntemplate <typename FloatType, typename = typename std::enable_if<std::is_floating_point<FloatType>::value>::type>\ninline bool operator>(const Complex<FloatType>& left, const RubyObject& right) {\n  return RubyObject(left) > right;\n}\n\n} // end of namespace nm\n\nnamespace std {\n  inline nm::RubyObject abs(const nm::RubyObject& obj) {\n    return obj.abs();\n  }\n\n\n  inline nm::RubyObject sqrt(const nm::RubyObject& obj) {\n    VALUE cMath = rb_const_get(rb_cObject, rb_intern(\"Math\"));\n    return nm::RubyObject(rb_funcall(cMath, rb_intern(\"sqrt\"), 1, obj.rval));\n  }\n}\n\n#endif // RUBY_OBJECT_H\n"
  },
  {
    "path": "ext/nmatrix/extconf.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == extconf.rb\n#\n# This file checks for ATLAS and other necessary headers, and\n# generates a Makefile for compiling NMatrix.\n\nrequire File.expand_path(\"../../../lib/nmatrix/mkmf\", __FILE__)\n\n$INSTALLFILES = [\n  ['nmatrix.h'       , '$(archdir)'], \n  ['nmatrix.hpp'     , '$(archdir)'],\n  ['nmatrix_config.h', '$(archdir)'], \n  ['nm_memory.h'     , '$(archdir)'],\n  ['ruby_constants.h', '$(archdir)']\n]\n\nif /cygwin|mingw/ =~ RUBY_PLATFORM\n  $INSTALLFILES << ['libnmatrix.a', '$(archdir)']\nend\n\n$DEBUG = true\n$CFLAGS = [\"-Wall -Werror=return-type\",$CFLAGS].join(\" \")\n$CXXFLAGS = [\"-Wall -Werror=return-type\",$CXXFLAGS].join(\" \")\n$CPPFLAGS = [\"-Wall -Werror=return-type\",$CPPFLAGS].join(\" \")\n\n# When adding objects here, make sure their directories are included in CLEANOBJS down at the bottom of extconf.rb.\nbasenames = %w{nmatrix ruby_constants data/data util/io math util/sl_list storage/common storage/storage storage/dense/dense storage/yale/yale storage/list/list}\n$objs = basenames.map { |b| \"#{b}.o\"   }\n$srcs = basenames.map { |b| \"#{b}.cpp\" }\n\n#$libs += \" -lprofiler \"\n\ncreate_conf_h(\"nmatrix_config.h\")\ncreate_makefile(\"nmatrix\")\n\nDir.mkdir(\"data\") unless Dir.exists?(\"data\")\nDir.mkdir(\"util\") unless Dir.exists?(\"util\")\nDir.mkdir(\"storage\") unless Dir.exists?(\"storage\")\nDir.chdir(\"storage\") do\n  Dir.mkdir(\"yale\")  unless Dir.exists?(\"yale\")\n  Dir.mkdir(\"list\")  unless Dir.exists?(\"list\")\n  Dir.mkdir(\"dense\") unless Dir.exists?(\"dense\")\nend\n\n# to clean up object files in subdirectories:\nopen('Makefile', 'a') do |f|\n  clean_objs_paths = %w{data storage storage/dense storage/yale storage/list util}.map { |d| \"#{d}/*.#{CONFIG[\"OBJEXT\"]}\" }\n  f.write(\"CLEANOBJS := $(CLEANOBJS) #{clean_objs_paths.join(' ')}\")\nend\n"
  },
  {
    "path": "ext/nmatrix/math/asum.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - present, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - present, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == asum.h\n//\n// CBLAS asum function\n//\n\n/*\n *             Automatically Tuned Linear Algebra Software v3.8.4\n *                    (C) Copyright 1999 R. Clint Whaley\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *   1. Redistributions of source code must retain the above copyright\n *      notice, this list of conditions and the following disclaimer.\n *   2. Redistributions in binary form must reproduce the above copyright\n *      notice, this list of conditions, and the following disclaimer in the\n *      documentation and/or other materials provided with the distribution.\n *   3. The name of the ATLAS group or the names of its contributers may\n *      not be used to endorse or promote products derived from this\n *      software without specific written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\n * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ATLAS GROUP OR ITS CONTRIBUTORS\n * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n#ifndef ASUM_H\n#define ASUM_H\n\n\n#include \"math/magnitude.h\"\n\nnamespace nm { namespace math {\n\n/*\n * Level 1 BLAS routine which sums the absolute values of a vector's contents. If the vector consists of complex values,\n * the routine sums the absolute values of the real and imaginary components as well.\n *\n * So, based on input types, these are the valid return types:\n *    int -> int\n *    float -> float or double\n *    double -> double\n *    complex64 -> float or double\n *    complex128 -> double\n */\ntemplate <typename DType, typename MDType = typename MagnitudeDType<DType>::type>\ninline MDType asum(const int N, const DType* X, const int incX) {\n  MDType sum = 0;\n  if ((N > 0) && (incX > 0)) {\n    for (int i = 0; i < N; ++i) {\n      sum += magnitude(X[i*incX]);\n    }\n  }\n  return sum;\n}\n\n\ntemplate <typename DType, typename MDType = typename MagnitudeDType<DType>::type>\ninline void cblas_asum(const int N, const void* X, const int incX, void* sum) {\n  *reinterpret_cast<MDType*>( sum ) = asum<DType,MDType>( N, reinterpret_cast<const DType*>(X), incX );\n}\n\n\n\n}} // end of namespace nm::math\n\n#endif // ASUM_H\n"
  },
  {
    "path": "ext/nmatrix/math/cblas_enums.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2015, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2015, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == cblas_enums.h\n//\n// CBLAS definitions for when CBLAS is not available.\n//\n\n#ifndef CBLAS_ENUM_DEFINED_H\n#define CBLAS_ENUM_DEFINED_H\nenum CBLAS_ORDER {CblasRowMajor=101, CblasColMajor=102};\nenum CBLAS_TRANSPOSE {CblasNoTrans=111, CblasTrans=112, CblasConjTrans=113};\nenum CBLAS_UPLO {CblasUpper=121, CblasLower=122};\nenum CBLAS_DIAG {CblasNonUnit=131, CblasUnit=132};\nenum CBLAS_SIDE {CblasLeft=141, CblasRight=142};\n#endif\n"
  },
  {
    "path": "ext/nmatrix/math/cblas_templates_core.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == cblas_templates_core.h\n//\n// This header files is not used by the main nmatrix gem but has to be stored\n// in this directory so that it can be shared between nmatrix-atlas and\n// nmatrix-lapack.\n//\n\n//This is not a normal header file so we don't use an include guard.\n//See ext/nmatrix_atlas/math_atlas/cblas_templates_atlas.h for how\n//to use.\n\n//Below are the BLAS functions for which we have internal implementations.\n//The internal implementations are defined in the ext/nmatrix/math directory\n//and are the non-specialized\n//forms of the template functions nm::math::whatever().\n//They are are called below for non-BLAS\n//types in the non-specialized form of the template nm::math::something_else::whatever().\n//The specialized forms call the appropriate cblas functions.\n\n//For all functions besides herk, we also define the cblas_whatever() template\n//functions below, which just cast\n//their arguments to the appropriate types.\n\n//rotg\ntemplate <typename DType>\ninline void rotg(DType* a, DType* b, DType* c, DType* s) {\n  nm::math::rotg(a, b, c, s);\n}\n\ntemplate <>\ninline void rotg(float* a, float* b, float* c, float* s) {\n  cblas_srotg(a, b, c, s);\n}\n\ntemplate <>\ninline void rotg(double* a, double* b, double* c, double* s) {\n  cblas_drotg(a, b, c, s);\n}\n\n//Complex versions of rot and rotg are available in the ATLAS (and Intel)\n//version of CBLAS, but not part\n//of the reference implementation or OpenBLAS, so we omit them here\n//and fall back to the generic internal implementation.\n//Another options would be to directly call the fortran functions, e.g. ZROTG,\n//which for some reason are a part of the standard.\n//We can still define complex specializations of these functions in an ATLAS-specific\n//header.\n\ntemplate <typename DType>\ninline void cblas_rotg(void* a, void* b, void* c, void* s) {\n  rotg<DType>(static_cast<DType*>(a), static_cast<DType*>(b), static_cast<DType*>(c), static_cast<DType*>(s));\n}\n\n//rot\ntemplate <typename DType, typename CSDType>\ninline void rot(const int N, DType* X, const int incX, DType* Y, const int incY, const CSDType c, const CSDType s) {\n  nm::math::rot<DType,CSDType>(N, X, incX, Y, incY, c, s);\n}\n\ntemplate <>\ninline void rot(const int N, float* X, const int incX, float* Y, const int incY, const float c, const float s) {\n  cblas_srot(N, X, incX, Y, incY, (float)c, (float)s);\n}\n\ntemplate <>\ninline void rot(const int N, double* X, const int incX, double* Y, const int incY, const double c, const double s) {\n  cblas_drot(N, X, incX, Y, incY, c, s);\n}\n\ntemplate <typename DType, typename CSDType>\ninline void cblas_rot(const int N, void* X, const int incX, void* Y, const int incY, const void* c, const void* s) {\n  rot<DType,CSDType>(N, static_cast<DType*>(X), incX, static_cast<DType*>(Y), incY,\n                       *static_cast<const CSDType*>(c), *static_cast<const CSDType*>(s));\n}\n\n/*\n * Level 1 BLAS routine which sums the absolute values of a vector's contents. If the vector consists of complex values,\n * the routine sums the absolute values of the real and imaginary components as well.\n *\n * So, based on input types, these are the valid return types:\n *    int -> int\n *    float -> float or double\n *    double -> double\n *    complex64 -> float or double\n *    complex128 -> double\n */\ntemplate <typename DType, typename MDType = typename MagnitudeDType<DType>::type>\ninline MDType asum(const int N, const DType* X, const int incX) {\n  return nm::math::asum<DType,MDType>(N,X,incX);\n}\n\n\ntemplate <>\ninline float asum(const int N, const float* X, const int incX) {\n  return cblas_sasum(N, X, incX);\n}\n\ntemplate <>\ninline double asum(const int N, const double* X, const int incX) {\n  return cblas_dasum(N, X, incX);\n}\n\ntemplate <>\ninline float asum(const int N, const Complex64* X, const int incX) {\n  return cblas_scasum(N, X, incX);\n}\n\ntemplate <>\ninline double asum(const int N, const Complex128* X, const int incX) {\n  return cblas_dzasum(N, X, incX);\n}\n\n\ntemplate <typename DType, typename MDType = typename MagnitudeDType<DType>::type>\ninline void cblas_asum(const int N, const void* X, const int incX, void* sum) {\n  *static_cast<MDType*>( sum ) = asum<DType, MDType>( N, static_cast<const DType*>(X), incX );\n}\n\n/*\n * Level 1 BLAS routine which returns the 2-norm of an n-vector x.\n #\n * Based on input types, these are the valid return types:\n *    int -> int\n *    float -> float or double\n *    double -> double\n *    complex64 -> float or double\n *    complex128 -> double\n */\ntemplate <typename DType, typename MDType = typename MagnitudeDType<DType>::type>\ninline MDType nrm2(const int N, const DType* X, const int incX) {\n  return nm::math::nrm2<DType,MDType>(N, X, incX);\n}\n\n\ntemplate <>\ninline float nrm2(const int N, const float* X, const int incX) {\n  return cblas_snrm2(N, X, incX);\n}\n\ntemplate <>\ninline double nrm2(const int N, const double* X, const int incX) {\n  return cblas_dnrm2(N, X, incX);\n}\n\ntemplate <>\ninline float nrm2(const int N, const Complex64* X, const int incX) {\n  return cblas_scnrm2(N, X, incX);\n}\n\ntemplate <>\ninline double nrm2(const int N, const Complex128* X, const int incX) {\n  return cblas_dznrm2(N, X, incX);\n}\n\ntemplate <typename DType, typename MDType = typename MagnitudeDType<DType>::type>\ninline void cblas_nrm2(const int N, const void* X, const int incX, void* result) {\n  *static_cast<MDType*>( result ) = nrm2<DType, MDType>( N, static_cast<const DType*>(X), incX );\n}\n\n//imax\ntemplate<typename DType>\ninline int imax(const int n, const DType *x, const int incx) {\n  return nm::math::imax(n, x, incx);\n}\n\ntemplate<>\ninline int imax(const int n, const float* x, const int incx) {\n  return cblas_isamax(n, x, incx);\n}\n\ntemplate<>\ninline int imax(const int n, const double* x, const int incx) {\n  return cblas_idamax(n, x, incx);\n}\n\ntemplate<>\ninline int imax(const int n, const Complex64* x, const int incx) {\n  return cblas_icamax(n, x, incx);\n}\n\ntemplate <>\ninline int imax(const int n, const Complex128* x, const int incx) {\n  return cblas_izamax(n, x, incx);\n}\n\ntemplate<typename DType>\ninline int cblas_imax(const int n, const void* x, const int incx) {\n  return imax<DType>(n, static_cast<const DType*>(x), incx);\n}\n\n//scal\ntemplate <typename DType>\ninline void scal(const int n, const DType scalar, DType* x, const int incx) {\n  nm::math::scal(n, scalar, x, incx);\n}\n\ntemplate <>\ninline void scal(const int n, const float scalar, float* x, const int incx) {\n  cblas_sscal(n, scalar, x, incx);\n}\n\ntemplate <>\ninline void scal(const int n, const double scalar, double* x, const int incx) {\n  cblas_dscal(n, scalar, x, incx);\n}\n\ntemplate <>\ninline void scal(const int n, const Complex64 scalar, Complex64* x, const int incx) {\n  cblas_cscal(n, &scalar, x, incx);\n}\n\ntemplate <>\ninline void scal(const int n, const Complex128 scalar, Complex128* x, const int incx) {\n  cblas_zscal(n, &scalar, x, incx);\n}\n\ntemplate <typename DType>\ninline void cblas_scal(const int n, const void* scalar, void* x, const int incx) {\n  scal<DType>(n, *static_cast<const DType*>(scalar), static_cast<DType*>(x), incx);\n}\n\n//gemv\ntemplate <typename DType>\ninline bool gemv(const enum CBLAS_TRANSPOSE Trans, const int M, const int N, const DType* alpha, const DType* A, const int lda,\n          const DType* X, const int incX, const DType* beta, DType* Y, const int incY) {\n  return nm::math::gemv(Trans, M, N, alpha, A, lda, X, incX, beta, Y, incY);\n}\n\ntemplate <>\ninline bool gemv(const enum CBLAS_TRANSPOSE Trans, const int M, const int N, const float* alpha, const float* A, const int lda,\n          const float* X, const int incX, const float* beta, float* Y, const int incY) {\n  cblas_sgemv(CblasRowMajor, Trans, M, N, *alpha, A, lda, X, incX, *beta, Y, incY);\n  return true;\n}\n\ntemplate <>\ninline bool gemv(const enum CBLAS_TRANSPOSE Trans, const int M, const int N, const double* alpha, const double* A, const int lda,\n          const double* X, const int incX, const double* beta, double* Y, const int incY) {\n  cblas_dgemv(CblasRowMajor, Trans, M, N, *alpha, A, lda, X, incX, *beta, Y, incY);\n  return true;\n}\n\ntemplate <>\ninline bool gemv(const enum CBLAS_TRANSPOSE Trans, const int M, const int N, const Complex64* alpha, const Complex64* A, const int lda,\n          const Complex64* X, const int incX, const Complex64* beta, Complex64* Y, const int incY) {\n  cblas_cgemv(CblasRowMajor, Trans, M, N, alpha, A, lda, X, incX, beta, Y, incY);\n  return true;\n}\n\ntemplate <>\ninline bool gemv(const enum CBLAS_TRANSPOSE Trans, const int M, const int N, const Complex128* alpha, const Complex128* A, const int lda,\n          const Complex128* X, const int incX, const Complex128* beta, Complex128* Y, const int incY) {\n  cblas_zgemv(CblasRowMajor, Trans, M, N, alpha, A, lda, X, incX, beta, Y, incY);\n  return true;\n}\n\ntemplate <typename DType>\ninline static bool cblas_gemv(const enum CBLAS_TRANSPOSE trans,\n                              const int m, const int n,\n                              const void* alpha,\n                              const void* a, const int lda,\n                              const void* x, const int incx,\n                              const void* beta,\n                              void* y, const int incy)\n{\n  return gemv<DType>(trans,\n                     m, n, static_cast<const DType*>(alpha),\n                     static_cast<const DType*>(a), lda,\n                     static_cast<const DType*>(x), incx, static_cast<const DType*>(beta),\n                     static_cast<DType*>(y), incy);\n}\n\n//gemm\ntemplate <typename DType>\ninline void gemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,\n                 const DType* alpha, const DType* A, const int lda, const DType* B, const int ldb, const DType* beta, DType* C, const int ldc)\n{\n  nm::math::gemm(Order, TransA, TransB, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc);\n}\n\ntemplate <>\ninline void gemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,\n          const float* alpha, const float* A, const int lda, const float* B, const int ldb, const float* beta, float* C, const int ldc) {\n  cblas_sgemm(Order, TransA, TransB, M, N, K, *alpha, A, lda, B, ldb, *beta, C, ldc);\n}\n\ntemplate <>\ninline void gemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,\n          const double* alpha, const double* A, const int lda, const double* B, const int ldb, const double* beta, double* C, const int ldc) {\n  cblas_dgemm(Order, TransA, TransB, M, N, K, *alpha, A, lda, B, ldb, *beta, C, ldc);\n}\n\ntemplate <>\ninline void gemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,\n          const Complex64* alpha, const Complex64* A, const int lda, const Complex64* B, const int ldb, const Complex64* beta, Complex64* C, const int ldc) {\n  cblas_cgemm(Order, TransA, TransB, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc);\n}\n\ntemplate <>\ninline void gemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,\n          const Complex128* alpha, const Complex128* A, const int lda, const Complex128* B, const int ldb, const Complex128* beta, Complex128* C, const int ldc) {\n  cblas_zgemm(Order, TransA, TransB, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc);\n}\n\ntemplate <typename DType>\ninline static void cblas_gemm(const enum CBLAS_ORDER order,\n                              const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_TRANSPOSE trans_b,\n                              int m, int n, int k,\n                              void* alpha,\n                              void* a, int lda,\n                              void* b, int ldb,\n                              void* beta,\n                              void* c, int ldc)\n{\n  gemm<DType>(order, trans_a, trans_b, m, n, k, static_cast<DType*>(alpha),\n              static_cast<DType*>(a), lda,\n              static_cast<DType*>(b), ldb, static_cast<DType*>(beta),\n              static_cast<DType*>(c), ldc);\n}\n\n//trsm\ntemplate <typename DType, typename = typename std::enable_if<!std::is_integral<DType>::value>::type>\ninline void trsm(const enum CBLAS_ORDER order,\n                 const enum CBLAS_SIDE side, const enum CBLAS_UPLO uplo,\n                 const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_DIAG diag,\n                 const int m, const int n, const DType alpha, const DType* a,\n                 const int lda, DType* b, const int ldb)\n{\n  nm::math::trsm(order, side, uplo, trans_a, diag, m, n, alpha, a, lda, b, ldb);\n}\n\ntemplate <>\ninline void trsm(const enum CBLAS_ORDER order, const enum CBLAS_SIDE side, const enum CBLAS_UPLO uplo,\n                 const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_DIAG diag,\n                 const int m, const int n, const float alpha, const float* a,\n                 const int lda, float* b, const int ldb)\n{\n  cblas_strsm(order, side, uplo, trans_a, diag, m, n, alpha, a, lda, b, ldb);\n}\n\ntemplate <>\ninline void trsm(const enum CBLAS_ORDER order, const enum CBLAS_SIDE side, const enum CBLAS_UPLO uplo,\n                 const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_DIAG diag,\n                 const int m, const int n, const double alpha, const double* a,\n                 const int lda, double* b, const int ldb)\n{\n  cblas_dtrsm(order, side, uplo, trans_a, diag, m, n, alpha, a, lda, b, ldb);\n}\n\n\ntemplate <>\ninline void trsm(const enum CBLAS_ORDER order, const enum CBLAS_SIDE side, const enum CBLAS_UPLO uplo,\n                 const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_DIAG diag,\n                 const int m, const int n, const Complex64 alpha, const Complex64* a,\n                 const int lda, Complex64* b, const int ldb)\n{\n  cblas_ctrsm(order, side, uplo, trans_a, diag, m, n, &alpha, a, lda, b, ldb);\n}\n\ntemplate <>\ninline void trsm(const enum CBLAS_ORDER order, const enum CBLAS_SIDE side, const enum CBLAS_UPLO uplo,\n                 const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_DIAG diag,\n                 const int m, const int n, const Complex128 alpha, const Complex128* a,\n                 const int lda, Complex128* b, const int ldb)\n{\n  cblas_ztrsm(order, side, uplo, trans_a, diag, m, n, &alpha, a, lda, b, ldb);\n}\n\ntemplate <typename DType>\ninline static void cblas_trsm(const enum CBLAS_ORDER order, const enum CBLAS_SIDE side, const enum CBLAS_UPLO uplo,\n                               const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_DIAG diag,\n                               const int m, const int n, const void* alpha, const void* a,\n                               const int lda, void* b, const int ldb)\n{\n  trsm<DType>(order, side, uplo, trans_a, diag, m, n, *static_cast<const DType*>(alpha),\n              static_cast<const DType*>(a), lda, static_cast<DType*>(b), ldb);\n}\n\n//Below are BLAS functions that we don't have an internal implementation for.\n//In this case the non-specialized form just raises an error.\n\n//syrk\ntemplate <typename DType>\ninline void syrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE Trans, const int N,\n                 const int K, const DType* alpha, const DType* A, const int lda, const DType* beta, DType* C, const int ldc) {\n  rb_raise(rb_eNotImpError, \"syrk not yet implemented for non-BLAS dtypes\");\n}\n\ntemplate <>\ninline void syrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE Trans, const int N,\n                 const int K, const float* alpha, const float* A, const int lda, const float* beta, float* C, const int ldc) {\n  cblas_ssyrk(Order, Uplo, Trans, N, K, *alpha, A, lda, *beta, C, ldc);\n}\n\ntemplate <>\ninline void syrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE Trans, const int N,\n                 const int K, const double* alpha, const double* A, const int lda, const double* beta, double* C, const int ldc) {\n  cblas_dsyrk(Order, Uplo, Trans, N, K, *alpha, A, lda, *beta, C, ldc);\n}\n\ntemplate <>\ninline void syrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE Trans, const int N,\n                 const int K, const Complex64* alpha, const Complex64* A, const int lda, const Complex64* beta, Complex64* C, const int ldc) {\n  cblas_csyrk(Order, Uplo, Trans, N, K, alpha, A, lda, beta, C, ldc);\n}\n\ntemplate <>\ninline void syrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE Trans, const int N,\n                 const int K, const Complex128* alpha, const Complex128* A, const int lda, const Complex128* beta, Complex128* C, const int ldc) {\n  cblas_zsyrk(Order, Uplo, Trans, N, K, alpha, A, lda, beta, C, ldc);\n}\n\ntemplate <typename DType>\ninline static void cblas_syrk(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const enum CBLAS_TRANSPOSE trans,\n                              const int n, const int k, const void* alpha,\n                              const void* A, const int lda, const void* beta, void* C, const int ldc)\n{\n  syrk<DType>(order, uplo, trans, n, k, static_cast<const DType*>(alpha),\n              static_cast<const DType*>(A), lda, static_cast<const DType*>(beta), static_cast<DType*>(C), ldc);\n}\n\n//herk\ntemplate <typename DType>\ninline void herk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE Trans, const int N,\n                 const int K, const DType* alpha, const DType* A, const int lda, const DType* beta, DType* C, const int ldc) {\n  rb_raise(rb_eNotImpError, \"herk not yet implemented for non-BLAS dtypes\");\n}\n\ntemplate <>\ninline void herk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE Trans, const int N,\n                 const int K, const Complex64* alpha, const Complex64* A, const int lda, const Complex64* beta, Complex64* C, const int ldc) {\n  cblas_cherk(Order, Uplo, Trans, N, K, alpha->r, A, lda, beta->r, C, ldc);\n}\n\ntemplate <>\ninline void herk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE Trans, const int N,\n                 const int K, const Complex128* alpha, const Complex128* A, const int lda, const Complex128* beta, Complex128* C, const int ldc) {\n  cblas_zherk(Order, Uplo, Trans, N, K, alpha->r, A, lda, beta->r, C, ldc);\n}\n\n//trmm\ntemplate <typename DType>\ninline void trmm(const enum CBLAS_ORDER order, const enum CBLAS_SIDE side, const enum CBLAS_UPLO uplo,\n                 const enum CBLAS_TRANSPOSE ta, const enum CBLAS_DIAG diag, const int m, const int n, const DType* alpha,\n                 const DType* A, const int lda, DType* B, const int ldb) {\n  rb_raise(rb_eNotImpError, \"trmm not yet implemented for non-BLAS dtypes\");\n}\n\ntemplate <>\ninline void trmm(const enum CBLAS_ORDER order, const enum CBLAS_SIDE side, const enum CBLAS_UPLO uplo,\n                 const enum CBLAS_TRANSPOSE ta, const enum CBLAS_DIAG diag, const int m, const int n, const float* alpha,\n                 const float* A, const int lda, float* B, const int ldb) {\n  cblas_strmm(order, side, uplo, ta, diag, m, n, *alpha, A, lda, B, ldb);\n}\n\ntemplate <>\ninline void trmm(const enum CBLAS_ORDER order, const enum CBLAS_SIDE side, const enum CBLAS_UPLO uplo,\n                 const enum CBLAS_TRANSPOSE ta, const enum CBLAS_DIAG diag, const int m, const int n, const double* alpha,\n                 const double* A, const int lda, double* B, const int ldb) {\n  cblas_dtrmm(order, side, uplo, ta, diag, m, n, *alpha, A, lda, B, ldb);\n}\n\ntemplate <>\ninline void trmm(const enum CBLAS_ORDER order, const enum CBLAS_SIDE side, const enum CBLAS_UPLO uplo,\n                 const enum CBLAS_TRANSPOSE ta, const enum CBLAS_DIAG diag, const int m, const int n, const Complex64* alpha,\n                 const Complex64* A, const int lda, Complex64* B, const int ldb) {\n  cblas_ctrmm(order, side, uplo, ta, diag, m, n, alpha, A, lda, B, ldb);\n}\n\ntemplate <>\ninline void trmm(const enum CBLAS_ORDER order, const enum CBLAS_SIDE side, const enum CBLAS_UPLO uplo,\n                 const enum CBLAS_TRANSPOSE ta, const enum CBLAS_DIAG diag, const int m, const int n, const Complex128* alpha,\n                 const Complex128* A, const int lda, Complex128* B, const int ldb) {\n  cblas_ztrmm(order, side, uplo, ta, diag, m, n, alpha, A, lda, B, ldb);\n}\n\ntemplate <typename DType>\ninline static void cblas_trmm(const enum CBLAS_ORDER order, const enum CBLAS_SIDE side, const enum CBLAS_UPLO uplo,\n                              const enum CBLAS_TRANSPOSE ta, const enum CBLAS_DIAG diag, const int m, const int n, const void* alpha,\n                              const void* A, const int lda, void* B, const int ldb)\n{\n  trmm<DType>(order, side, uplo, ta, diag, m, n, static_cast<const DType*>(alpha),\n              static_cast<const DType*>(A), lda, static_cast<DType*>(B), ldb);\n}\n"
  },
  {
    "path": "ext/nmatrix/math/gemm.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == gemm.h\n//\n// Header file for interface with ATLAS's CBLAS gemm functions and\n// native templated version of LAPACK's gemm function.\n//\n\n#ifndef GEMM_H\n# define GEMM_H\n\n#include \"cblas_enums.h\"\n#include \"math/long_dtype.h\"\n\nnamespace nm { namespace math {\n/*\n * GEneral Matrix Multiplication: based on dgemm.f from Netlib.\n *\n * This is an extremely inefficient algorithm. Recommend using ATLAS' version instead.\n *\n * Template parameters: LT -- long version of type T. Type T is the matrix dtype.\n *\n * This version throws no errors. Use gemm<DType> instead for error checking.\n */\ntemplate <typename DType>\ninline void gemm_nothrow(const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,\n                 const DType* alpha, const DType* A, const int lda, const DType* B, const int ldb, const DType* beta, DType* C, const int ldc)\n{\n\n  typename LongDType<DType>::type temp;\n\n  // Quick return if possible\n  if (!M or !N or ((*alpha == 0 or !K) and *beta == 1)) return;\n\n  // For alpha = 0\n  if (*alpha == 0) {\n    if (*beta == 0) {\n      for (int j = 0; j < N; ++j)\n        for (int i = 0; i < M; ++i) {\n          C[i+j*ldc] = 0;\n        }\n    } else {\n      for (int j = 0; j < N; ++j)\n        for (int i = 0; i < M; ++i) {\n          C[i+j*ldc] *= *beta;\n        }\n    }\n    return;\n  }\n\n  // Start the operations\n  if (TransB == CblasNoTrans) {\n    if (TransA == CblasNoTrans) {\n      // C = alpha*A*B+beta*C\n      for (int j = 0; j < N; ++j) {\n        if (*beta == 0) {\n          for (int i = 0; i < M; ++i) {\n            C[i+j*ldc] = 0;\n          }\n        } else if (*beta != 1) {\n          for (int i = 0; i < M; ++i) {\n            C[i+j*ldc] *= *beta;\n          }\n        }\n\n        for (int l = 0; l < K; ++l) {\n          if (B[l+j*ldb] != 0) {\n            temp = *alpha * B[l+j*ldb];\n            for (int i = 0; i < M; ++i) {\n              C[i+j*ldc] += A[i+l*lda] * temp;\n            }\n          }\n        }\n      }\n\n    } else {\n\n      // C = alpha*A**DType*B + beta*C\n      for (int j = 0; j < N; ++j) {\n        for (int i = 0; i < M; ++i) {\n          temp = 0;\n          for (int l = 0; l < K; ++l) {\n            temp += A[l+i*lda] * B[l+j*ldb];\n          }\n\n          if (*beta == 0) {\n            C[i+j*ldc] = *alpha*temp;\n          } else {\n            C[i+j*ldc] = *alpha*temp + *beta*C[i+j*ldc];\n          }\n        }\n      }\n\n    }\n\n  } else if (TransA == CblasNoTrans) {\n\n    // C = alpha*A*B**T + beta*C\n    for (int j = 0; j < N; ++j) {\n      if (*beta == 0) {\n        for (int i = 0; i < M; ++i) {\n          C[i+j*ldc] = 0;\n        }\n      } else if (*beta != 1) {\n        for (int i = 0; i < M; ++i) {\n          C[i+j*ldc] *= *beta;\n        }\n      }\n\n      for (int l = 0; l < K; ++l) {\n        if (B[j+l*ldb] != 0) {\n          temp = *alpha * B[j+l*ldb];\n          for (int i = 0; i < M; ++i) {\n            C[i+j*ldc] += A[i+l*lda] * temp;\n          }\n        }\n      }\n\n    }\n\n  } else {\n\n    // C = alpha*A**DType*B**T + beta*C\n    for (int j = 0; j < N; ++j) {\n      for (int i = 0; i < M; ++i) {\n        temp = 0;\n        for (int l = 0; l < K; ++l) {\n          temp += A[l+i*lda] * B[j+l*ldb];\n        }\n\n        if (*beta == 0) {\n          C[i+j*ldc] = *alpha*temp;\n        } else {\n          C[i+j*ldc] = *alpha*temp + *beta*C[i+j*ldc];\n        }\n      }\n    }\n\n  }\n\n  return;\n}\n\n\n\ntemplate <typename DType>\ninline void gemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,\n                 const DType* alpha, const DType* A, const int lda, const DType* B, const int ldb, const DType* beta, DType* C, const int ldc)\n{\n  if (Order == CblasRowMajor) {\n    if (TransA == CblasNoTrans) {\n      if (lda < std::max(K,1)) {\n        rb_raise(rb_eArgError, \"lda must be >= MAX(K,1): lda=%d K=%d\", lda, K);\n      }\n    } else {\n      if (lda < std::max(M,1)) { // && TransA == CblasTrans\n        rb_raise(rb_eArgError, \"lda must be >= MAX(M,1): lda=%d M=%d\", lda, M);\n      }\n    }\n\n    if (TransB == CblasNoTrans) {\n      if (ldb < std::max(N,1)) {\n        rb_raise(rb_eArgError, \"ldb must be >= MAX(N,1): ldb=%d N=%d\", ldb, N);\n      }\n    } else {\n      if (ldb < std::max(K,1)) {\n        rb_raise(rb_eArgError, \"ldb must be >= MAX(K,1): ldb=%d K=%d\", ldb, K);\n      }\n    }\n\n    if (ldc < std::max(N,1)) {\n      rb_raise(rb_eArgError, \"ldc must be >= MAX(N,1): ldc=%d N=%d\", ldc, N);\n    }\n  } else { // CblasColMajor\n    if (TransA == CblasNoTrans) {\n      if (lda < std::max(M,1)) {\n        rb_raise(rb_eArgError, \"lda must be >= MAX(M,1): lda=%d M=%d\", lda, M);\n      }\n    } else {\n      if (lda < std::max(K,1)) { // && TransA == CblasTrans\n        rb_raise(rb_eArgError, \"lda must be >= MAX(K,1): lda=%d K=%d\", lda, K);\n      }\n    }\n\n    if (TransB == CblasNoTrans) {\n      if (ldb < std::max(K,1)) {\n        rb_raise(rb_eArgError, \"ldb must be >= MAX(K,1): ldb=%d N=%d\", ldb, K);\n      }\n    } else {\n      if (ldb < std::max(N,1)) { // NOTE: This error message is actually wrong in the ATLAS source currently. Or are we wrong?\n        rb_raise(rb_eArgError, \"ldb must be >= MAX(N,1): ldb=%d N=%d\", ldb, N);\n      }\n    }\n\n    if (ldc < std::max(M,1)) {\n      rb_raise(rb_eArgError, \"ldc must be >= MAX(M,1): ldc=%d N=%d\", ldc, M);\n    }\n  }\n\n  /*\n   * Call SYRK when that's what the user is actually asking for; just handle beta=0, because beta=X requires\n   * we copy C and then subtract to preserve asymmetry.\n   */\n\n  if (A == B && M == N && TransA != TransB && lda == ldb && beta == 0) {\n    rb_raise(rb_eNotImpError, \"syrk and syreflect not implemented\");\n    /*syrk<DType>(CblasUpper, (Order == CblasColMajor) ? TransA : TransB, N, K, alpha, A, lda, beta, C, ldc);\n    syreflect(CblasUpper, N, C, ldc);\n    */\n  }\n\n  if (Order == CblasRowMajor)    gemm_nothrow<DType>(TransB, TransA, N, M, K, alpha, B, ldb, A, lda, beta, C, ldc);\n  else                           gemm_nothrow<DType>(TransA, TransB, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc);\n\n}\n\n\n}} // end of namespace nm::math\n\n#endif // GEMM_H\n"
  },
  {
    "path": "ext/nmatrix/math/gemv.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == gemv.h\n//\n// Header file for interface with ATLAS's CBLAS gemv functions and\n// native templated version of LAPACK's gemv function.\n//\n\n#ifndef GEMV_H\n# define GEMV_H\n\n#include \"math/long_dtype.h\"\n\nnamespace nm { namespace math {\n\n/*\n * GEneral Matrix-Vector multiplication: based on dgemv.f from Netlib.\n *\n * This is an extremely inefficient algorithm. Recommend using ATLAS' version instead.\n *\n * Template parameters: LT -- long version of type T. Type T is the matrix dtype.\n */\ntemplate <typename DType>\ninline bool gemv(const enum CBLAS_TRANSPOSE Trans, const int M, const int N, const DType* alpha, const DType* A, const int lda,\n          const DType* X, const int incX, const DType* beta, DType* Y, const int incY) {\n  int lenX, lenY, i, j;\n  int kx, ky, iy, jx, jy, ix;\n\n  typename LongDType<DType>::type temp;\n\n  // Test the input parameters\n  if (Trans < 111 || Trans > 113) {\n    rb_raise(rb_eArgError, \"GEMV: TransA must be CblasNoTrans, CblasTrans, or CblasConjTrans\");\n    return false;\n  } else if (lda < std::max(1, N)) {\n    fprintf(stderr, \"GEMV: N = %d; got lda=%d\", N, lda);\n    rb_raise(rb_eArgError, \"GEMV: Expected lda >= max(1, N)\");\n    return false;\n  } else if (incX == 0) {\n    rb_raise(rb_eArgError, \"GEMV: Expected incX != 0\\n\");\n    return false;\n  } else if (incY == 0) {\n    rb_raise(rb_eArgError, \"GEMV: Expected incY != 0\\n\");\n    return false;\n  }\n\n  // Quick return if possible\n  if (!M or !N or (*alpha == 0 and *beta == 1)) return true;\n\n  if (Trans == CblasNoTrans) {\n    lenX = N;\n    lenY = M;\n  } else {\n    lenX = M;\n    lenY = N;\n  }\n\n  if (incX > 0) kx = 0;\n  else          kx = (lenX - 1) * -incX;\n\n  if (incY > 0) ky = 0;\n  else          ky =  (lenY - 1) * -incY;\n\n  // Start the operations. In this version, the elements of A are accessed sequentially with one pass through A.\n  if (*beta != 1) {\n    if (incY == 1) {\n      if (*beta == 0) {\n        for (i = 0; i < lenY; ++i) {\n          Y[i] = 0;\n        }\n      } else {\n        for (i = 0; i < lenY; ++i) {\n          Y[i] *= *beta;\n        }\n      }\n    } else {\n      iy = ky;\n      if (*beta == 0) {\n        for (i = 0; i < lenY; ++i) {\n          Y[iy] = 0;\n          iy += incY;\n        }\n      } else {\n        for (i = 0; i < lenY; ++i) {\n          Y[iy] *= *beta;\n          iy += incY;\n        }\n      }\n    }\n  }\n\n  if (*alpha == 0) return false;\n\n  if (Trans == CblasNoTrans) {\n\n    // Form  y := alpha*A*x + y.\n    jx = kx;\n    if (incY == 1) {\n      for (j = 0; j < N; ++j) {\n        if (X[jx] != 0) {\n          temp = *alpha * X[jx];\n          for (i = 0; i < M; ++i) {\n            Y[i] += A[j+i*lda] * temp;\n          }\n        }\n        jx += incX;\n      }\n    } else {\n      for (j = 0; j < N; ++j) {\n        if (X[jx] != 0) {\n          temp = *alpha * X[jx];\n          iy = ky;\n          for (i = 0; i < M; ++i) {\n            Y[iy] += A[j+i*lda] * temp;\n            iy += incY;\n          }\n        }\n        jx += incX;\n      }\n    }\n\n  } else { // TODO: Check that indices are correct! They're switched for C.\n\n    // Form  y := alpha*A**DType*x + y.\n    jy = ky;\n\n    if (incX == 1) {\n      for (j = 0; j < N; ++j) {\n        temp = 0;\n        for (i = 0; i < M; ++i) {\n          temp += A[j+i*lda]*X[j];\n        }\n        Y[jy] += *alpha * temp;\n        jy += incY;\n      }\n    } else {\n      for (j = 0; j < N; ++j) {\n        temp = 0;\n        ix = kx;\n        for (i = 0; i < M; ++i) {\n          temp += A[j+i*lda] * X[ix];\n          ix += incX;\n        }\n\n        Y[jy] += *alpha * temp;\n        jy += incY;\n      }\n    }\n  }\n\n  return true;\n}  // end of GEMV\n\n\n}} // end of namespace nm::math\n\n#endif // GEMM_H\n"
  },
  {
    "path": "ext/nmatrix/math/getrf.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - present, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - present, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == getrf.h\n//\n// getrf function in native C++.\n//\n\n/*\n *             Automatically Tuned Linear Algebra Software v3.8.4\n *                    (C) Copyright 1999 R. Clint Whaley\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *   1. Redistributions of source code must retain the above copyright\n *      notice, this list of conditions and the following disclaimer.\n *   2. Redistributions in binary form must reproduce the above copyright\n *      notice, this list of conditions, and the following disclaimer in the\n *      documentation and/or other materials provided with the distribution.\n *   3. The name of the ATLAS group or the names of its contributers may\n *      not be used to endorse or promote products derived from this\n *      software without specific written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\n * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ATLAS GROUP OR ITS CONTRIBUTORS\n * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n#ifndef GETRF_H\n#define GETRF_H\n\n#include \"math/laswp.h\"\n#include \"math/math.h\"\n#include \"math/trsm.h\"\n#include \"math/gemm.h\"\n#include \"math/imax.h\"\n#include \"math/scal.h\"\n\nnamespace nm { namespace math {\n\n/* Numeric inverse -- usually just 1 / f, but a little more complicated for complex. */\ntemplate <typename DType>\ninline DType numeric_inverse(const DType& n) {\n  return n.inverse();\n}\ntemplate <> inline float numeric_inverse(const float& n) { return 1 / n; }\ntemplate <> inline double numeric_inverse(const double& n) { return 1 / n; }\n\n/*\n * Templated version of row-order and column-order getrf, derived from ATL_getrfR.c (from ATLAS 3.8.0).\n *\n * 1. Row-major factorization of form\n *   A = L * U * P\n * where P is a column-permutation matrix, L is lower triangular (lower\n * trapazoidal if M > N), and U is upper triangular with unit diagonals (upper\n * trapazoidal if M < N).  This is the recursive Level 3 BLAS version.\n *\n * 2. Column-major factorization of form\n *   A = P * L * U\n * where P is a row-permutation matrix, L is lower triangular with unit diagonal\n * elements (lower trapazoidal if M > N), and U is upper triangular (upper\n * trapazoidal if M < N).  This is the recursive Level 3 BLAS version.\n *\n * Template argument determines whether 1 or 2 is utilized.\n */\ntemplate <bool RowMajor, typename DType>\ninline int getrf_nothrow(const int M, const int N, DType* A, const int lda, int* ipiv) {\n  const int MN = std::min(M, N);\n  int ierr = 0;\n\n  // Symbols used by ATLAS in the several versions of this function:\n  // Row   Col      Us\n  // Nup   Nleft    N_ul\n  // Ndown Nright   N_dr\n  // We're going to use N_ul, N_dr\n\n  DType neg_one = -1, one = 1;\n\n  if (MN > 1) {\n    int N_ul = MN >> 1;\n\n    // FIXME: Figure out how ATLAS #defines NB\n#ifdef NB\n    if (N_ul > NB) N_ul = ATL_MulByNB(ATL_DivByNB(N_ul));\n#endif\n\n    int N_dr;\n    if (RowMajor) {\n      N_dr = M - N_ul;\n    } else {\n      N_dr = N - N_ul;\n    }\n\n    int i = RowMajor ? getrf_nothrow<true,DType>(N_ul, N, A, lda, ipiv) : getrf_nothrow<false,DType>(M, N_ul, A, lda, ipiv);\n\n    if (i) if (!ierr) ierr = i;\n\n    DType *Ar, *Ac, *An;\n    if (RowMajor) {\n      Ar = &(A[N_ul * lda]),\n      Ac = &(A[N_ul]);\n      An = &(Ar[N_ul]);\n\n      nm::math::laswp<DType>(N_dr, Ar, lda, 0, N_ul, ipiv, 1);\n\n      nm::math::trsm<DType>(CblasRowMajor, CblasRight, CblasUpper, CblasNoTrans, CblasUnit, N_dr, N_ul, one, A, lda, Ar, lda);\n      nm::math::gemm<DType>(CblasRowMajor, CblasNoTrans, CblasNoTrans, N_dr, N-N_ul, N_ul, &neg_one, Ar, lda, Ac, lda, &one, An, lda);\n\n      i = getrf_nothrow<true,DType>(N_dr, N-N_ul, An, lda, ipiv+N_ul);\n    } else {\n      Ar = NULL;\n      Ac = &(A[N_ul * lda]);\n      An = &(Ac[N_ul]);\n\n      nm::math::laswp<DType>(N_dr, Ac, lda, 0, N_ul, ipiv, 1);\n\n      nm::math::trsm<DType>(CblasColMajor, CblasLeft, CblasLower, CblasNoTrans, CblasUnit, N_ul, N_dr, one, A, lda, Ac, lda);\n      nm::math::gemm<DType>(CblasColMajor, CblasNoTrans, CblasNoTrans, M-N_ul, N_dr, N_ul, &neg_one, &(A[N_ul]), lda, Ac, lda, &one, An, lda);\n\n      i = getrf_nothrow<false,DType>(M-N_ul, N_dr, An, lda, ipiv+N_ul);\n    }\n\n    if (i) if (!ierr) ierr = N_ul + i;\n\n    for (i = N_ul; i != MN; i++) {\n      ipiv[i] += N_ul;\n    }\n\n    nm::math::laswp<DType>(N_ul, A, lda, N_ul, MN, ipiv, 1);  /* apply pivots */\n\n  } else if (MN == 1) { // there's another case for the colmajor version, but it doesn't seem to be necessary.\n\n    int i;\n    if (RowMajor) {\n      i = *ipiv = nm::math::imax<DType>(N, A, 1); // cblas_iamax(N, A, 1);\n    } else {\n      i = *ipiv = nm::math::imax<DType>(M, A, 1);\n    }\n\n    DType tmp = A[i];\n    if (tmp != 0) {\n\n      nm::math::scal<DType>((RowMajor ? N : M), nm::math::numeric_inverse(tmp), A, 1);\n      A[i] = *A;\n      *A   = tmp;\n\n    } else ierr = 1;\n\n  }\n  return(ierr);\n}\n\n\n/*\n * From ATLAS 3.8.0:\n *\n * Computes one of two LU factorizations based on the setting of the Order\n * parameter, as follows:\n * ----------------------------------------------------------------------------\n *                       Order == CblasColMajor\n * Column-major factorization of form\n *   A = P * L * U\n * where P is a row-permutation matrix, L is lower triangular with unit\n * diagonal elements (lower trapazoidal if M > N), and U is upper triangular\n * (upper trapazoidal if M < N).\n *\n * ----------------------------------------------------------------------------\n *                       Order == CblasRowMajor\n * Row-major factorization of form\n *   A = P * L * U\n * where P is a column-permutation matrix, L is lower triangular (lower\n * trapazoidal if M > N), and U is upper triangular with unit diagonals (upper\n * trapazoidal if M < N).\n *\n * ============================================================================\n * Let IERR be the return value of the function:\n *    If IERR == 0, successful exit.\n *    If (IERR < 0) the -IERR argument had an illegal value\n *    If (IERR > 0 && Order == CblasColMajor)\n *       U(i-1,i-1) is exactly zero.  The factorization has been completed,\n *       but the factor U is exactly singular, and division by zero will\n *       occur if it is used to solve a system of equations.\n *    If (IERR > 0 && Order == CblasRowMajor)\n *       L(i-1,i-1) is exactly zero.  The factorization has been completed,\n *       but the factor L is exactly singular, and division by zero will\n *       occur if it is used to solve a system of equations.\n */\ntemplate <typename DType>\ninline int getrf(const enum CBLAS_ORDER Order, const int M, const int N, DType* A, int lda, int* ipiv) {\n  if (Order == CblasRowMajor) {\n    if (lda < std::max(1,N)) {\n      rb_raise(rb_eArgError, \"GETRF: lda must be >= MAX(N,1): lda=%d N=%d\", lda, N);\n      return -6;\n    }\n\n    return getrf_nothrow<true,DType>(M, N, A, lda, ipiv);\n  } else {\n    if (lda < std::max(1,M)) {\n      rb_raise(rb_eArgError, \"GETRF: lda must be >= MAX(M,1): lda=%d M=%d\", lda, M);\n      return -6;\n    }\n\n    return getrf_nothrow<false,DType>(M, N, A, lda, ipiv);\n    //rb_raise(rb_eNotImpError, \"column major getrf not implemented\");\n  }\n}\n\n\n\n/*\n* Function signature conversion for calling LAPACK's getrf functions as directly as possible.\n*\n* For documentation: http://www.netlib.org/lapack/double/dgetrf.f\n*\n* This function should normally go in math.cpp, but we need it to be available to nmatrix.cpp.\n*/\ntemplate <typename DType>\ninline int clapack_getrf(const enum CBLAS_ORDER order, const int m, const int n, void* a, const int lda, int* ipiv) {\n  return getrf<DType>(order, m, n, reinterpret_cast<DType*>(a), lda, ipiv);\n}\n\n\n} } // end nm::math\n\n#endif\n"
  },
  {
    "path": "ext/nmatrix/math/getrs.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == getrs.h\n//\n// getrs function in native C++.\n//\n\n/*\n *             Automatically Tuned Linear Algebra Software v3.8.4\n *                    (C) Copyright 1999 R. Clint Whaley\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *   1. Redistributions of source code must retain the above copyright\n *      notice, this list of conditions and the following disclaimer.\n *   2. Redistributions in binary form must reproduce the above copyright\n *      notice, this list of conditions, and the following disclaimer in the\n *      documentation and/or other materials provided with the distribution.\n *   3. The name of the ATLAS group or the names of its contributers may\n *      not be used to endorse or promote products derived from this\n *      software without specific written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\n * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ATLAS GROUP OR ITS CONTRIBUTORS\n * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n#ifndef GETRS_H\n#define GETRS_H\n\nnamespace nm { namespace math {\n\n\n/*\n * Solves a system of linear equations A*X = B with a general NxN matrix A using the LU factorization computed by GETRF.\n *\n * From ATLAS 3.8.0.\n */\ntemplate <typename DType>\nint getrs(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE Trans, const int N, const int NRHS, const DType* A,\n           const int lda, const int* ipiv, DType* B, const int ldb)\n{\n  // enum CBLAS_DIAG Lunit, Uunit; // These aren't used. Not sure why they're declared in ATLAS' src.\n\n  if (!N || !NRHS) return 0;\n\n  const DType ONE = 1;\n\n  if (Order == CblasColMajor) {\n    if (Trans == CblasNoTrans) {\n      nm::math::laswp<DType>(NRHS, B, ldb, 0, N, ipiv, 1);\n      nm::math::trsm<DType>(Order, CblasLeft, CblasLower, CblasNoTrans, CblasUnit, N, NRHS, ONE, A, lda, B, ldb);\n      nm::math::trsm<DType>(Order, CblasLeft, CblasUpper, CblasNoTrans, CblasNonUnit, N, NRHS, ONE, A, lda, B, ldb);\n    } else {\n      nm::math::trsm<DType>(Order, CblasLeft, CblasUpper, Trans, CblasNonUnit, N, NRHS, ONE, A, lda, B, ldb);\n      nm::math::trsm<DType>(Order, CblasLeft, CblasLower, Trans, CblasUnit, N, NRHS, ONE, A, lda, B, ldb);\n      nm::math::laswp<DType>(NRHS, B, ldb, 0, N, ipiv, -1);\n    }\n  } else {\n    if (Trans == CblasNoTrans) {\n      nm::math::trsm<DType>(Order, CblasRight, CblasLower, CblasTrans, CblasNonUnit, NRHS, N, ONE, A, lda, B, ldb);\n      nm::math::trsm<DType>(Order, CblasRight, CblasUpper, CblasTrans, CblasUnit, NRHS, N, ONE, A, lda, B, ldb);\n      nm::math::laswp<DType>(NRHS, B, ldb, 0, N, ipiv, -1);\n    } else {\n      nm::math::laswp<DType>(NRHS, B, ldb, 0, N, ipiv, 1);\n      nm::math::trsm<DType>(Order, CblasRight, CblasUpper, CblasNoTrans, CblasUnit, NRHS, N, ONE, A, lda, B, ldb);\n      nm::math::trsm<DType>(Order, CblasRight, CblasLower, CblasNoTrans, CblasNonUnit, NRHS, N, ONE, A, lda, B, ldb);\n    }\n  }\n  return 0;\n}\n\n\n/*\n* Function signature conversion for calling LAPACK's getrs functions as directly as possible.\n*\n* For documentation: http://www.netlib.org/lapack/double/dgetrs.f\n*\n* This function should normally go in math.cpp, but we need it to be available to nmatrix.cpp.\n*/\ntemplate <typename DType>\ninline int clapack_getrs(const enum CBLAS_ORDER order, const enum CBLAS_TRANSPOSE trans, const int n, const int nrhs,\n                         const void* a, const int lda, const int* ipiv, void* b, const int ldb) {\n  return getrs<DType>(order, trans, n, nrhs, reinterpret_cast<const DType*>(a), lda, ipiv, reinterpret_cast<DType*>(b), ldb);\n}\n\n\n} } // end nm::math\n\n#endif // GETRS_H\n"
  },
  {
    "path": "ext/nmatrix/math/imax.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - present, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - present, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == imax.h\n//\n// BLAS level 1 function imax.\n//\n\n#ifndef IMAX_H\n#define IMAX_H\n\n#include \"math/magnitude.h\"\n\nnamespace nm { namespace math {\n\n\ntemplate<typename DType>\ninline int imax(const int n, const DType *x, const int incx) {\n\n  if (n < 1 || incx <= 0) {\n    return -1;\n  }\n  if (n == 1) {\n    return 0;\n  }\n\n  typename MagnitudeDType<DType>::type dmax;\n  int imax = 0;\n\n  if (incx == 1) { // if incrementing by 1\n\n    dmax = magnitude(x[0]);\n\n    for (int i = 1; i < n; ++i) {\n      if (magnitude(x[i]) > dmax) {\n        imax = i;\n        dmax = magnitude(x[i]);\n      }\n    }\n\n  } else { // if incrementing by more than 1\n\n    dmax = magnitude(x[0]);\n\n    for (int i = 1, ix = incx; i < n; ++i, ix += incx) {\n      if (magnitude(x[ix]) > dmax) {\n        imax = i;\n        dmax = magnitude(x[ix]);\n      }\n    }\n  }\n  return imax;\n}\n\ntemplate<typename DType>\ninline int cblas_imax(const int n, const void* x, const int incx) {\n  return imax<DType>(n, reinterpret_cast<const DType*>(x), incx);\n}\n\n}} // end of namespace nm::math\n\n#endif /* IMAX_H */\n"
  },
  {
    "path": "ext/nmatrix/math/laswp.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == laswp.h\n//\n// laswp function in native C++.\n//\n/*\n *             Automatically Tuned Linear Algebra Software v3.8.4\n *                    (C) Copyright 1999 R. Clint Whaley\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *   1. Redistributions of source code must retain the above copyright\n *      notice, this list of conditions and the following disclaimer.\n *   2. Redistributions in binary form must reproduce the above copyright\n *      notice, this list of conditions, and the following disclaimer in the\n *      documentation and/or other materials provided with the distribution.\n *   3. The name of the ATLAS group or the names of its contributers may\n *      not be used to endorse or promote products derived from this\n *      software without specific written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\n * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ATLAS GROUP OR ITS CONTRIBUTORS\n * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n#ifndef LASWP_H\n#define LASWP_H\n\nnamespace nm { namespace math {\n\n\n/*\n * ATLAS function which performs row interchanges on a general rectangular matrix. Modeled after the LAPACK LASWP function.\n *\n * This version is templated for use by template <> getrf().\n */\ntemplate <typename DType>\ninline void laswp(const int N, DType* A, const int lda, const int K1, const int K2, const int *piv, const int inci) {\n  //const int n = K2 - K1; // not sure why this is declared. commented it out because it's unused.\n\n  int nb = N >> 5;\n\n  const int mr = N - (nb<<5);\n  const int incA = lda << 5;\n\n  if (K2 < K1) return;\n\n  int i1, i2;\n  if (inci < 0) {\n    piv -= (K2-1) * inci;\n    i1 = K2 - 1;\n    i2 = K1;\n  } else {\n    piv += K1 * inci;\n    i1 = K1;\n    i2 = K2-1;\n  }\n\n  if (nb) {\n\n    do {\n      const int* ipiv = piv;\n      int i           = i1;\n      int KeepOn;\n\n      do {\n        int ip = *ipiv; ipiv += inci;\n\n        if (ip != i) {\n          DType *a0 = &(A[i]),\n                *a1 = &(A[ip]);\n\n          for (int h = 32; h; h--) {\n            DType r   = *a0;\n            *a0       = *a1;\n            *a1       = r;\n\n            a0 += lda;\n            a1 += lda;\n          }\n\n        }\n        if (inci > 0) KeepOn = (++i <= i2);\n        else          KeepOn = (--i >= i2);\n\n      } while (KeepOn);\n      A += incA;\n    } while (--nb);\n  }\n\n  if (mr) {\n    const int* ipiv = piv;\n    int i           = i1;\n    int KeepOn;\n\n    do {\n      int ip = *ipiv; ipiv += inci;\n      if (ip != i) {\n        DType *a0 = &(A[i]),\n              *a1 = &(A[ip]);\n\n        for (int h = mr; h; h--) {\n          DType r   = *a0;\n          *a0       = *a1;\n          *a1       = r;\n\n          a0 += lda;\n          a1 += lda;\n        }\n      }\n\n      if (inci > 0) KeepOn = (++i <= i2);\n      else          KeepOn = (--i >= i2);\n\n    } while (KeepOn);\n  }\n}\n\n\n/*\n* Function signature conversion for calling LAPACK's laswp functions as directly as possible.\n*\n* For documentation: http://www.netlib.org/lapack/double/dlaswp.f\n*\n* This function should normally go in math.cpp, but we need it to be available to nmatrix.cpp.\n*/\ntemplate <typename DType>\ninline void clapack_laswp(const int n, void* a, const int lda, const int k1, const int k2, const int* ipiv, const int incx) {\n  laswp<DType>(n, reinterpret_cast<DType*>(a), lda, k1, k2, ipiv, incx);\n}\n\n} }  // namespace nm::math\n#endif // LASWP_H\n"
  },
  {
    "path": "ext/nmatrix/math/long_dtype.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - present, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - present, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == long_dtype.h\n//\n// Declarations necessary for the native versions of GEMM and GEMV,\n// as well as for IMAX.\n//\n\n#ifndef LONG_DTYPE_H\n#define LONG_DTYPE_H\n\nnamespace nm { namespace math {\n  // These allow an increase in precision for intermediate values of gemm and gemv.\n  // See also: http://stackoverflow.com/questions/11873694/how-does-one-increase-precision-in-c-templates-in-a-template-typename-dependen\n  template <typename DType> struct LongDType;\n  template <> struct LongDType<uint8_t> { typedef int16_t type; };\n  template <> struct LongDType<int8_t> { typedef int16_t type; };\n  template <> struct LongDType<int16_t> { typedef int32_t type; };\n  template <> struct LongDType<int32_t> { typedef int64_t type; };\n  template <> struct LongDType<int64_t> { typedef int64_t type; };\n  template <> struct LongDType<float> { typedef double type; };\n  template <> struct LongDType<double> { typedef double type; };\n  template <> struct LongDType<Complex64> { typedef Complex128 type; };\n  template <> struct LongDType<Complex128> { typedef Complex128 type; };\n  template <> struct LongDType<RubyObject> { typedef RubyObject type; };\n\n  template <typename DType> struct MagnitudeDType;\n  template <> struct MagnitudeDType<uint8_t> { typedef uint8_t type; };\n  template <> struct MagnitudeDType<int8_t> { typedef int8_t type; };\n  template <> struct MagnitudeDType<int16_t> { typedef int16_t type; };\n  template <> struct MagnitudeDType<int32_t> { typedef int32_t type; };\n  template <> struct MagnitudeDType<int64_t> { typedef int64_t type; };\n  template <> struct MagnitudeDType<float> { typedef float type; };\n  template <> struct MagnitudeDType<double> { typedef double type; };\n  template <> struct MagnitudeDType<Complex64> { typedef float type; };\n  template <> struct MagnitudeDType<Complex128> { typedef double type; };\n  template <> struct MagnitudeDType<RubyObject> { typedef RubyObject type; };\n  \n}} // end of namespace nm::math\n\n#endif\n"
  },
  {
    "path": "ext/nmatrix/math/magnitude.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - present, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - present, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == math/magnitude.h\n//\n// Takes the absolute value (meaning magnitude) of each DType.\n// Needed for a variety of BLAS/LAPACK functions.\n//\n\n#ifndef MAGNITUDE_H\n#define MAGNITUDE_H\n\n#include \"math/long_dtype.h\"\n\nnamespace nm { namespace math {\n\n/* Magnitude -- may be complicated for unsigned types, and need to call the correct STL abs for floats/doubles */ \ntemplate <typename DType, typename MDType = typename MagnitudeDType<DType>::type>\ninline MDType magnitude(const DType& v) {\n  return v.abs();\n}\ntemplate <> inline float magnitude(const float& v) { return std::abs(v); }\ntemplate <> inline double magnitude(const double& v) { return std::abs(v); }\ntemplate <> inline uint8_t magnitude(const uint8_t& v) { return v; }\ntemplate <> inline int8_t magnitude(const int8_t& v) { return std::abs(v); }\ntemplate <> inline int16_t magnitude(const int16_t& v) { return std::abs(v); }\ntemplate <> inline int32_t magnitude(const int32_t& v) { return std::abs(v); }\ntemplate <> inline int64_t magnitude(const int64_t& v) { return std::abs(v); }\ntemplate <> inline float magnitude(const nm::Complex64& v) { return std::sqrt(v.r * v.r + v.i * v.i); }\ntemplate <> inline double magnitude(const nm::Complex128& v) { return std::sqrt(v.r * v.r + v.i * v.i); } \n    \n}}\n\n#endif // MAGNITUDE_H\n"
  },
  {
    "path": "ext/nmatrix/math/math.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == math.h\n//\n// Header file for math functions, interfacing with BLAS, etc.\n//\n// For instructions on adding CBLAS and CLAPACK functions, see the\n// beginning of math.cpp.\n//\n// Some of these functions are from ATLAS. Here is the license for\n// ATLAS:\n//\n/*\n *             Automatically Tuned Linear Algebra Software v3.8.4\n *                    (C) Copyright 1999 R. Clint Whaley\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *   1. Redistributions of source code must retain the above copyright\n *      notice, this list of conditions and the following disclaimer.\n *   2. Redistributions in binary form must reproduce the above copyright\n *      notice, this list of conditions, and the following disclaimer in the\n *      documentation and/or other materials provided with the distribution.\n *   3. The name of the ATLAS group or the names of its contributers may\n *      not be used to endorse or promote products derived from this\n *      software without specific written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\n * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ATLAS GROUP OR ITS CONTRIBUTORS\n * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n#ifndef MATH_H\n#define MATH_H\n\n/*\n * Standard Includes\n */\n\n#include \"cblas_enums.h\"\n\n#include <algorithm> // std::min, std::max\n#include <limits> // std::numeric_limits\n#include <memory> // std::unique_ptr\n\n/*\n * Project Includes\n */\n\n/*\n * Macros\n */\n#define REAL_RECURSE_LIMIT 4\n\n/*\n * Data\n */\n\n\nextern \"C\" {\n  /*\n   * C accessors.\n   */\n\n  void nm_math_transpose_generic(const size_t M, const size_t N, const void* A, const int lda, void* B, const int ldb, size_t element_size);\n  void nm_math_init_blas(void);\n\n  /*\n   * Pure math implementations.\n   */  \n  void nm_math_solve(VALUE lu, VALUE b, VALUE x, VALUE ipiv);\n  void nm_math_inverse(const int M, void* A_elements, nm::dtype_t dtype);\n  void nm_math_hessenberg(VALUE a);\n  void nm_math_det_exact_from_dense(const int M, const void* elements, \n      const int lda, nm::dtype_t dtype, void* result);\n  void nm_math_det_exact_from_yale(const int M, const YALE_STORAGE* storage, \n      const int lda, nm::dtype_t dtype, void* result);\n  void nm_math_inverse_exact_from_dense(const int M, const void* A_elements, \n      const int lda, void* B_elements, const int ldb, nm::dtype_t dtype);\n  void nm_math_inverse_exact_from_yale(const int M, const YALE_STORAGE* storage, \n      const int lda, YALE_STORAGE* inverse, const int ldb, nm::dtype_t dtype);\n}\n\n\nnamespace nm {\n  namespace math {\n\n/*\n * Types\n */\n\n\n/*\n * Functions\n */\n\n// Yale: numeric matrix multiply c=a*b\ntemplate <typename DType>\ninline void numbmm(const unsigned int n, const unsigned int m, const unsigned int l, const IType* ia, const IType* ja, const DType* a, const bool diaga,\n            const IType* ib, const IType* jb, const DType* b, const bool diagb, IType* ic, IType* jc, DType* c, const bool diagc) {\n  const unsigned int max_lmn = std::max(std::max(m, n), l);\n  std::unique_ptr<IType[]> next(new IType[max_lmn]);\n  std::unique_ptr<DType[]> sums(new DType[max_lmn]);\n\n  DType v;\n\n  IType head, length, temp, ndnz = 0;\n  IType minmn = std::min(m,n);\n  IType minlm = std::min(l,m);\n\n  for (IType idx = 0; idx < max_lmn; ++idx) { // initialize scratch arrays\n    next[idx] = std::numeric_limits<IType>::max();\n    sums[idx] = 0;\n  }\n\n  for (IType i = 0; i < n; ++i) { // walk down the rows\n    head = std::numeric_limits<IType>::max()-1; // head gets assigned as whichever column of B's row j we last visited\n    length = 0;\n\n    for (IType jj = ia[i]; jj <= ia[i+1]; ++jj) { // walk through entries in each row\n      IType j;\n\n      if (jj == ia[i+1]) { // if we're in the last entry for this row:\n        if (!diaga || i >= minmn) continue;\n        j   = i;      // if it's a new Yale matrix, and last entry, get the diagonal position (j) and entry (ajj)\n        v   = a[i];\n      } else {\n        j   = ja[jj]; // if it's not the last entry for this row, get the column (j) and entry (ajj)\n        v   = a[jj];\n      }\n\n      for (IType kk = ib[j]; kk <= ib[j+1]; ++kk) {\n\n        IType k;\n\n        if (kk == ib[j+1]) { // Get the column id for that entry\n          if (!diagb || j >= minlm) continue;\n          k  = j;\n          sums[k] += v*b[k];\n        } else {\n          k  = jb[kk];\n          sums[k] += v*b[kk];\n        }\n\n        if (next[k] == std::numeric_limits<IType>::max()) {\n          next[k] = head;\n          head    = k;\n          ++length;\n        }\n      } // end of kk loop\n    } // end of jj loop\n\n    for (IType jj = 0; jj < length; ++jj) {\n      if (sums[head] != 0) {\n        if (diagc && head == i) {\n          c[head] = sums[head];\n        } else {\n          jc[n+1+ndnz] = head;\n          c[n+1+ndnz]  = sums[head];\n          ++ndnz;\n        }\n      }\n\n      temp = head;\n      head = next[head];\n\n      next[temp] = std::numeric_limits<IType>::max();\n      sums[temp] = 0;\n    }\n\n    ic[i+1] = n+1+ndnz;\n  }\n} /* numbmm_ */\n\n\n/*\ntemplate <typename DType, typename IType>\ninline void new_yale_matrix_multiply(const unsigned int m, const IType* ija, const DType* a, const IType* ijb, const DType* b, YALE_STORAGE* c_storage) {\n  unsigned int n = c_storage->shape[0],\n               l = c_storage->shape[1];\n\n  // Create a working vector of dimension max(m,l,n) and initial value IType::max():\n  std::vector<IType> mask(std::max(std::max(m,l),n), std::numeric_limits<IType>::max());\n\n  for (IType i = 0; i < n; ++i) { // A.rows.each_index do |i|\n\n    IType j, k;\n    size_t ndnz;\n\n    for (IType jj = ija[i]; jj <= ija[i+1]; ++jj) { // walk through column pointers for row i of A\n      j = (jj == ija[i+1]) ? i : ija[jj];   // Get the current column index (handle diagonals last)\n\n      if (j >= m) {\n        if (j == ija[jj]) rb_raise(rb_eIndexError, \"ija array for left-hand matrix contains an out-of-bounds column index %u at position %u\", jj, j);\n        else              break;\n      }\n\n      for (IType kk = ijb[j]; kk <= ijb[j+1]; ++kk) { // walk through column pointers for row j of B\n        if (j >= m) continue; // first of all, does B *have* a row j?\n        k = (kk == ijb[j+1]) ? j : ijb[kk];   // Get the current column index (handle diagonals last)\n\n        if (k >= l) {\n          if (k == ijb[kk]) rb_raise(rb_eIndexError, \"ija array for right-hand matrix contains an out-of-bounds column index %u at position %u\", kk, k);\n          else              break;\n        }\n\n        if (mask[k] == )\n      }\n\n    }\n  }\n}\n*/\n\n// Yale: Symbolic matrix multiply c=a*b\ninline size_t symbmm(const unsigned int n, const unsigned int m, const unsigned int l, const IType* ia, const IType* ja, const bool diaga,\n            const IType* ib, const IType* jb, const bool diagb, IType* ic, const bool diagc) {\n  unsigned int max_lmn = std::max(std::max(m,n), l);\n  IType mask[max_lmn];  // INDEX in the SMMP paper.\n  IType j, k; /* Local variables */\n  size_t ndnz = n;\n\n  for (IType idx = 0; idx < max_lmn; ++idx)\n    mask[idx] = std::numeric_limits<IType>::max();\n\n  if (ic) { // Only write to ic if it's supplied; otherwise, we're just counting.\n    if (diagc)  ic[0] = n+1;\n    else        ic[0] = 0;\n  }\n\n  IType minmn = std::min(m,n);\n  IType minlm = std::min(l,m);\n\n  for (IType i = 0; i < n; ++i) { // MAIN LOOP: through rows\n\n    for (IType jj = ia[i]; jj <= ia[i+1]; ++jj) { // merge row lists, walking through columns in each row\n\n      // j <- column index given by JA[jj], or handle diagonal.\n      if (jj == ia[i+1]) { // Don't really do it the last time -- just handle diagonals in a new yale matrix.\n        if (!diaga || i >= minmn) continue;\n        j = i;\n      } else j = ja[jj];\n\n      for (IType kk = ib[j]; kk <= ib[j+1]; ++kk) { // Now walk through columns K of row J in matrix B.\n        if (kk == ib[j+1]) {\n          if (!diagb || j >= minlm) continue;\n          k = j;\n        } else k = jb[kk];\n\n        if (mask[k] != i) {\n          mask[k] = i;\n          ++ndnz;\n        }\n      }\n    }\n\n    if (diagc && mask[i] == std::numeric_limits<IType>::max()) --ndnz;\n\n    if (ic) ic[i+1] = ndnz;\n  }\n\n  return ndnz;\n} /* symbmm_ */\n\n\n// In-place quicksort (from Wikipedia) -- called by smmp_sort_columns, below. All functions are inclusive of left, right.\nnamespace smmp_sort {\n  const size_t THRESHOLD = 4;  // switch to insertion sort for 4 elements or fewer\n\n  template <typename DType>\n  void print_array(DType* vals, IType* array, IType left, IType right) {\n    for (IType i = left; i <= right; ++i) {\n      std::cerr << array[i] << \":\" << vals[i] << \"  \";\n    }\n    std::cerr << std::endl;\n  }\n\n  template <typename DType>\n  IType partition(DType* vals, IType* array, IType left, IType right, IType pivot) {\n    IType pivotJ = array[pivot];\n    DType pivotV = vals[pivot];\n\n    // Swap pivot and right\n    array[pivot] = array[right];\n    vals[pivot]  = vals[right];\n    array[right] = pivotJ;\n    vals[right]  = pivotV;\n\n    IType store = left;\n    for (IType idx = left; idx < right; ++idx) {\n      if (array[idx] <= pivotJ) {\n        // Swap i and store\n        std::swap(array[idx], array[store]);\n        std::swap(vals[idx],  vals[store]);\n        ++store;\n      }\n    }\n\n    std::swap(array[store], array[right]);\n    std::swap(vals[store],  vals[right]);\n\n    return store;\n  }\n\n  // Recommended to use the median of left, right, and mid for the pivot.\n  template <typename I>\n  inline I median(I a, I b, I c) {\n    if (a < b) {\n      if (b < c) return b; // a b c\n      if (a < c) return c; // a c b\n                 return a; // c a b\n\n    } else { // a > b\n      if (a < c) return a; // b a c\n      if (b < c) return c; // b c a\n                 return b; // c b a\n    }\n  }\n\n\n  // Insertion sort is more efficient than quicksort for small N\n  template <typename DType>\n  void insertion_sort(DType* vals, IType* array, IType left, IType right) {\n    for (IType idx = left; idx <= right; ++idx) {\n      IType col_to_insert = array[idx];\n      DType val_to_insert = vals[idx];\n\n      IType hole_pos = idx;\n      for (; hole_pos > left && col_to_insert < array[hole_pos-1]; --hole_pos) {\n        array[hole_pos] = array[hole_pos - 1];  // shift the larger column index up\n        vals[hole_pos]  = vals[hole_pos - 1];   // value goes along with it\n      }\n\n      array[hole_pos] = col_to_insert;\n      vals[hole_pos]  = val_to_insert;\n    }\n  }\n\n\n  template <typename DType>\n  void quicksort(DType* vals, IType* array, IType left, IType right) {\n\n    if (left < right) {\n      if (right - left < THRESHOLD) {\n        insertion_sort(vals, array, left, right);\n      } else {\n        // choose any pivot such that left < pivot < right\n        IType pivot = median<IType>(left, right, (IType)(((unsigned long)left + (unsigned long)right) / 2));\n        pivot = partition(vals, array, left, right, pivot);\n\n        // recursively sort elements smaller than the pivot\n        quicksort<DType>(vals, array, left, pivot-1);\n\n        // recursively sort elements at least as big as the pivot\n        quicksort<DType>(vals, array, pivot+1, right);\n      }\n    }\n  }\n\n\n}; // end of namespace smmp_sort\n\n\n/*\n * For use following symbmm and numbmm. Sorts the matrix entries in each row according to the column index.\n * This utilizes quicksort, which is an in-place unstable sort (since there are no duplicate entries, we don't care\n * about stability).\n *\n * TODO: It might be worthwhile to do a test for free memory, and if available, use an unstable sort that isn't in-place.\n *\n * TODO: It's actually probably possible to write an even faster sort, since symbmm/numbmm are not producing a random\n * ordering. If someone is doing a lot of Yale matrix multiplication, it might benefit them to consider even insertion\n * sort.\n */\ntemplate <typename DType>\ninline void smmp_sort_columns(const size_t n, const IType* ia, IType* ja, DType* a) {\n  for (size_t i = 0; i < n; ++i) {\n    if (ia[i+1] - ia[i] < 2) continue; // no need to sort rows containing only one or two elements.\n    else if (ia[i+1] - ia[i] <= smmp_sort::THRESHOLD) {\n      smmp_sort::insertion_sort<DType>(a, ja, ia[i], ia[i+1]-1); // faster for small rows\n    } else {\n      smmp_sort::quicksort<DType>(a, ja, ia[i], ia[i+1]-1);      // faster for large rows (and may call insertion_sort as well)\n    }\n  }\n}\n\n\n// Copies an upper row-major array from U, zeroing U; U is unit, so diagonal is not copied.\n//\n// From ATLAS 3.8.0.\ntemplate <typename DType>\nstatic inline void trcpzeroU(const int M, const int N, DType* U, const int ldu, DType* C, const int ldc) {\n\n  for (int i = 0; i != M; ++i) {\n    for (int j = i+1; j < N; ++j) {\n      C[j] = U[j];\n      U[j] = 0;\n    }\n\n    C += ldc;\n    U += ldu;\n  }\n}\n\n\n/*\n * Un-comment the following lines when we figure out how to calculate NB for each of the ATLAS-derived\n * functions. This is probably really complicated.\n *\n * Also needed: ATL_MulByNB, ATL_DivByNB (both defined in the build process for ATLAS), and ATL_mmMU.\n *\n */\n\n/*\n\ntemplate <bool RowMajor, bool Upper, typename DType>\nstatic int trtri_4(const enum CBLAS_DIAG Diag, DType* A, const int lda) {\n\n  if (RowMajor) {\n    DType *pA0 = A, *pA1 = A+lda, *pA2 = A+2*lda, *pA3 = A+3*lda;\n    DType tmp;\n    if (Upper) {\n      DType A01 = pA0[1], A02 = pA0[2], A03 = pA0[3],\n                          A12 = pA1[2], A13 = pA1[3],\n                                        A23 = pA2[3];\n\n      if (Diag == CblasNonUnit) {\n        pA0->inverse();\n        (pA1+1)->inverse();\n        (pA2+2)->inverse();\n        (pA3+3)->inverse();\n\n        pA0[1] = -A01 * pA1[1] * pA0[0];\n        pA1[2] = -A12 * pA2[2] * pA1[1];\n        pA2[3] = -A23 * pA3[3] * pA2[2];\n\n        pA0[2] = -(A01 * pA1[2] + A02 * pA2[2]) * pA0[0];\n        pA1[3] = -(A12 * pA2[3] + A13 * pA3[3]) * pA1[1];\n\n        pA0[3] = -(A01 * pA1[3] + A02 * pA2[3] + A03 * pA3[3]) * pA0[0];\n\n      } else {\n\n        pA0[1] = -A01;\n        pA1[2] = -A12;\n        pA2[3] = -A23;\n\n        pA0[2] = -(A01 * pA1[2] + A02);\n        pA1[3] = -(A12 * pA2[3] + A13);\n\n        pA0[3] = -(A01 * pA1[3] + A02 * pA2[3] + A03);\n      }\n\n    } else { // Lower\n      DType A10 = pA1[0],\n            A20 = pA2[0], A21 = pA2[1],\n            A30 = PA3[0], A31 = pA3[1], A32 = pA3[2];\n      DType *B10 = pA1,\n            *B20 = pA2,\n            *B30 = pA3,\n            *B21 = pA2+1,\n            *B31 = pA3+1,\n            *B32 = pA3+2;\n\n\n      if (Diag == CblasNonUnit) {\n        pA0->inverse();\n        (pA1+1)->inverse();\n        (pA2+2)->inverse();\n        (pA3+3)->inverse();\n\n        *B10 = -A10 * pA0[0] * pA1[1];\n        *B21 = -A21 * pA1[1] * pA2[2];\n        *B32 = -A32 * pA2[2] * pA3[3];\n        *B20 = -(A20 * pA0[0] + A21 * (*B10)) * pA2[2];\n        *B31 = -(A31 * pA1[1] + A32 * (*B21)) * pA3[3];\n        *B30 = -(A30 * pA0[0] + A31 * (*B10) + A32 * (*B20)) * pA3;\n      } else {\n        *B10 = -A10;\n        *B21 = -A21;\n        *B32 = -A32;\n        *B20 = -(A20 + A21 * (*B10));\n        *B31 = -(A31 + A32 * (*B21));\n        *B30 = -(A30 + A31 * (*B10) + A32 * (*B20));\n      }\n    }\n\n  } else {\n    rb_raise(rb_eNotImpError, \"only row-major implemented at this time\");\n  }\n\n  return 0;\n\n}\n\n\ntemplate <bool RowMajor, bool Upper, typename DType>\nstatic int trtri_3(const enum CBLAS_DIAG Diag, DType* A, const int lda) {\n\n  if (RowMajor) {\n\n    DType tmp;\n\n    if (Upper) {\n      DType A01 = pA0[1], A02 = pA0[2], A03 = pA0[3],\n                          A12 = pA1[2], A13 = pA1[3];\n\n      DType *B01 = pA0 + 1,\n            *B02 = pA0 + 2,\n            *B12 = pA1 + 2;\n\n      if (Diag == CblasNonUnit) {\n        pA0->inverse();\n        (pA1+1)->inverse();\n        (pA2+2)->inverse();\n\n        *B01 = -A01 * pA1[1] * pA0[0];\n        *B12 = -A12 * pA2[2] * pA1[1];\n        *B02 = -(A01 * (*B12) + A02 * pA2[2]) * pA0[0];\n      } else {\n        *B01 = -A01;\n        *B12 = -A12;\n        *B02 = -(A01 * (*B12) + A02);\n      }\n\n    } else { // Lower\n      DType *pA0=A, *pA1=A+lda, *pA2=A+2*lda;\n      DType A10=pA1[0],\n            A20=pA2[0], A21=pA2[1];\n\n      DType *B10 = pA1,\n            *B20 = pA2;\n            *B21 = pA2+1;\n\n      if (Diag == CblasNonUnit) {\n        pA0->inverse();\n        (pA1+1)->inverse();\n        (pA2+2)->inverse();\n        *B10 = -A10 * pA0[0] * pA1[1];\n        *B21 = -A21 * pA1[1] * pA2[2];\n        *B20 = -(A20 * pA0[0] + A21 * (*B10)) * pA2[2];\n      } else {\n        *B10 = -A10;\n        *B21 = -A21;\n        *B20 = -(A20 + A21 * (*B10));\n      }\n    }\n\n\n  } else {\n    rb_raise(rb_eNotImpError, \"only row-major implemented at this time\");\n  }\n\n  return 0;\n\n}\n\ntemplate <bool RowMajor, bool Upper, bool Real, typename DType>\nstatic void trtri(const enum CBLAS_DIAG Diag, const int N, DType* A, const int lda) {\n  DType *Age, *Atr;\n  DType tmp;\n  int Nleft, Nright;\n\n  int ierr = 0;\n\n  static const DType ONE = 1;\n  static const DType MONE -1;\n  static const DType NONE = -1;\n\n  if (RowMajor) {\n\n    // FIXME: Use REAL_RECURSE_LIMIT here for float32 and float64 (instead of 1)\n    if ((Real && N > REAL_RECURSE_LIMIT) || (N > 1)) {\n      Nleft = N >> 1;\n#ifdef NB\n      if (Nleft > NB) NLeft = ATL_MulByNB(ATL_DivByNB(Nleft));\n#endif\n\n      Nright = N - Nleft;\n\n      if (Upper) {\n        Age = A + Nleft;\n        Atr = A + (Nleft * (lda+1));\n\n        nm::math::trsm<DType>(CblasRowMajor, CblasRight, CblasUpper, CblasNoTrans, Diag,\n                              Nleft, Nright, ONE, Atr, lda, Age, lda);\n\n        nm::math::trsm<DType>(CblasRowMajor, CblasLeft, CblasUpper, CblasNoTrans, Diag,\n                              Nleft, Nright, MONE, A, lda, Age, lda);\n\n      } else { // Lower\n        Age = A + ((Nleft*lda));\n        Atr = A + (Nleft * (lda+1));\n\n        nm::math::trsm<DType>(CblasRowMajor, CblasRight, CblasLower, CblasNoTrans, Diag,\n                              Nright, Nleft, ONE, A, lda, Age, lda);\n        nm::math::trsm<DType>(CblasRowMajor, CblasLeft, CblasLower, CblasNoTrans, Diag,\n                              Nright, Nleft, MONE, Atr, lda, Age, lda);\n      }\n\n      ierr = trtri<RowMajor,Upper,Real,DType>(Diag, Nleft, A, lda);\n      if (ierr) return ierr;\n\n      ierr = trtri<RowMajor,Upper,Real,DType>(Diag, Nright, Atr, lda);\n      if (ierr) return ierr + Nleft;\n\n    } else {\n      if (Real) {\n        if (N == 4) {\n          return trtri_4<RowMajor,Upper,Real,DType>(Diag, A, lda);\n        } else if (N == 3) {\n          return trtri_3<RowMajor,Upper,Real,DType>(Diag, A, lda);\n        } else if (N == 2) {\n          if (Diag == CblasNonUnit) {\n            A->inverse();\n            (A+(lda+1))->inverse();\n\n            if (Upper) {\n              *(A+1)     *=   *A;         // TRI_MUL\n              *(A+1)     *=   *(A+lda+1); // TRI_MUL\n            } else {\n              *(A+lda)   *=   *A;         // TRI_MUL\n              *(A+lda)   *=   *(A+lda+1); // TRI_MUL\n            }\n          }\n\n          if (Upper) *(A+1)   = -*(A+1);      // TRI_NEG\n          else       *(A+lda) = -*(A+lda);    // TRI_NEG\n        } else if (Diag == CblasNonUnit) A->inverse();\n      } else { // not real\n        if (Diag == CblasNonUnit) A->inverse();\n      }\n    }\n\n  } else {\n    rb_raise(rb_eNotImpError, \"only row-major implemented at this time\");\n  }\n\n  return ierr;\n}\n\n\ntemplate <bool RowMajor, bool Real, typename DType>\nint getri(const int N, DType* A, const int lda, const int* ipiv, DType* wrk, const int lwrk) {\n\n  if (!RowMajor) rb_raise(rb_eNotImpError, \"only row-major implemented at this time\");\n\n  int jb, nb, I, ndown, iret;\n\n  const DType ONE = 1, NONE = -1;\n\n  int iret = trtri<RowMajor,false,Real,DType>(CblasNonUnit, N, A, lda);\n  if (!iret && N > 1) {\n    jb = lwrk / N;\n    if (jb >= NB) nb = ATL_MulByNB(ATL_DivByNB(jb));\n    else if (jb >= ATL_mmMU) nb = (jb/ATL_mmMU)*ATL_mmMU;\n    else nb = jb;\n    if (!nb) return -6; // need at least 1 row of workspace\n\n    // only first iteration will have partial block, unroll it\n\n    jb = N - (N/nb) * nb;\n    if (!jb) jb = nb;\n    I = N - jb;\n    A += lda * I;\n    trcpzeroU<DType>(jb, jb, A+I, lda, wrk, jb);\n    nm::math::trsm<DType>(CblasRowMajor, CblasLeft, CblasUpper, CblasNoTrans, CblasUnit,\n                          jb, N, ONE, wrk, jb, A, lda);\n\n    if (I) {\n      do {\n        I -= nb;\n        A -= nb * lda;\n        ndown = N-I;\n        trcpzeroU<DType>(nb, ndown, A+I, lda, wrk, ndown);\n        nm::math::gemm<DType>(CblasRowMajor, CblasLeft, CblasUpper, CblasNoTrans, CblasUnit,\n                              nb, N, ONE, wrk, ndown, A, lda);\n      } while (I);\n    }\n\n    // Apply row interchanges\n\n    for (I = N - 2; I >= 0; --I) {\n      jb = ipiv[I];\n      if (jb != I) nm::math::swap<DType>(N, A+I*lda, 1, A+jb*lda, 1);\n    }\n  }\n\n  return iret;\n}\n*/\n\n/*\n * Macro for declaring LAPACK specializations of the getrf function.\n *\n * type is the DType; call is the specific function to call; cast_as is what the DType* should be\n * cast to in order to pass it to LAPACK.\n */\n#define LAPACK_GETRF(type, call, cast_as)                                     \\\ntemplate <>                                                                   \\\ninline int getrf(const enum CBLAS_ORDER Order, const int M, const int N, type * A, const int lda, int* ipiv) { \\\n  int info = call(Order, M, N, reinterpret_cast<cast_as *>(A), lda, ipiv);    \\\n  if (!info) return info;                                                     \\\n  else {                                                                      \\\n    rb_raise(rb_eArgError, \"getrf: problem with argument %d\\n\", info);        \\\n    return info;                                                              \\\n  }                                                                           \\\n}\n\n/* Specialize for ATLAS types */\n/*LAPACK_GETRF(float,      clapack_sgetrf, float)\nLAPACK_GETRF(double,     clapack_dgetrf, double)\nLAPACK_GETRF(Complex64,  clapack_cgetrf, void)\nLAPACK_GETRF(Complex128, clapack_zgetrf, void)\n*/\n\n}} // end namespace nm::math\n\n\n#endif // MATH_H\n"
  },
  {
    "path": "ext/nmatrix/math/nrm2.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - present, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - present, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == nrm2.h\n//\n// CBLAS nrm2 function\n//\n\n/*\n *             Automatically Tuned Linear Algebra Software v3.8.4\n *                    (C) Copyright 1999 R. Clint Whaley\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *   1. Redistributions of source code must retain the above copyright\n *      notice, this list of conditions and the following disclaimer.\n *   2. Redistributions in binary form must reproduce the above copyright\n *      notice, this list of conditions, and the following disclaimer in the\n *      documentation and/or other materials provided with the distribution.\n *   3. The name of the ATLAS group or the names of its contributers may\n *      not be used to endorse or promote products derived from this\n *      software without specific written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\n * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ATLAS GROUP OR ITS CONTRIBUTORS\n * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n#ifndef NRM2_H\n# define NRM2_H\n\n#include \"math/long_dtype.h\"\n\n\nnamespace nm { namespace math {\n\n/*\n * Level 1 BLAS routine which returns the 2-norm of an n-vector x.\n #\n * Based on input types, these are the valid return types:\n *    int -> int\n *    float -> float or double\n *    double -> double\n *    complex64 -> float or double\n *    complex128 -> double\n */\ntemplate <typename DType, typename MDType = typename MagnitudeDType<DType>::type>\nMDType nrm2(const int N, const DType* X, const int incX) {\n  const DType ONE = 1, ZERO = 0;\n  typename LongDType<DType>::type scale = 0, ssq = 1, absxi, temp;\n\n\n  if ((N < 1) || (incX < 1))    return ZERO;\n  else if (N == 1)              return std::abs(X[0]);\n\n  for (int i = 0; i < N; ++i) {\n    absxi = std::abs(X[i*incX]);\n    if (scale < absxi) {\n      temp  = scale / absxi;\n      scale = absxi;\n      ssq   = ONE + ssq * (temp * temp);\n    }\n    else if(scale != 0) {\n      temp = absxi / scale;\n      ssq += temp * temp;\n    }\n  }\n\n  return (MDType)(scale * std::sqrt( ssq ));\n}\n\n\ntemplate <typename FloatDType>\nstatic inline void nrm2_complex_helper(const FloatDType& xr, const FloatDType& xi, double& scale, double& ssq) {\n  double absx = std::abs(xr);\n  if (scale < absx) {\n    double temp  = scale / absx;\n    scale = absx;\n    ssq   = 1.0 + ssq * (temp * temp);\n  }\n  else if(scale != 0)  {\n    double temp = absx / scale;\n    ssq += temp * temp;\n  }\n\n  absx = std::abs(xi);\n  if (scale < absx) {\n    double temp  = scale / absx;\n    scale = absx;\n    ssq   = 1.0 + ssq * (temp * temp);\n  }\n  else if(scale != 0)  {\n    double temp = absx / scale;\n    ssq += temp * temp;\n  }\n}\n\ntemplate <>\nfloat nrm2(const int N, const Complex64* X, const int incX) {\n  double scale = 0, ssq = 1;\n\n  if ((N < 1) || (incX < 1))    return 0.0;\n\n  for (int i = 0; i < N; ++i) {\n    nrm2_complex_helper<float>(X[i*incX].r, X[i*incX].i, scale, ssq);\n  }\n\n  return scale * std::sqrt( ssq );\n}\n\n// FIXME: Function above is duplicated here, should be writeable as a template using\n// FIXME: xMagnitudeDType.\ntemplate <>\ndouble nrm2(const int N, const Complex128* X, const int incX) {\n  double scale = 0, ssq = 1;\n\n  if ((N < 1) || (incX < 1))    return 0.0;\n\n  for (int i = 0; i < N; ++i) {\n    nrm2_complex_helper<double>(X[i*incX].r, X[i*incX].i, scale, ssq);\n  }\n\n  return scale * std::sqrt( ssq );\n}\n\ntemplate <typename DType, typename MDType = typename MagnitudeDType<DType>::type>\ninline void cblas_nrm2(const int N, const void* X, const int incX, void* result) {\n  *reinterpret_cast<MDType*>( result ) = nrm2<DType, MDType>( N, reinterpret_cast<const DType*>(X), incX );\n}\n\n\n\n}} // end of namespace nm::math\n\n#endif // NRM2_H\n"
  },
  {
    "path": "ext/nmatrix/math/rot.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == rot.h\n//\n// BLAS rot function in native C++.\n//\n\n/*\n *             Automatically Tuned Linear Algebra Software v3.8.4\n *                    (C) Copyright 1999 R. Clint Whaley\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *   1. Redistributions of source code must retain the above copyright\n *      notice, this list of conditions and the following disclaimer.\n *   2. Redistributions in binary form must reproduce the above copyright\n *      notice, this list of conditions, and the following disclaimer in the\n *      documentation and/or other materials provided with the distribution.\n *   3. The name of the ATLAS group or the names of its contributers may\n *      not be used to endorse or promote products derived from this\n *      software without specific written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\n * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ATLAS GROUP OR ITS CONTRIBUTORS\n * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n#ifndef ROT_H\n# define ROT_H\n\nnamespace nm { namespace math {\n\n\n// TODO: Test this to see if it works properly on complex. ATLAS has a separate algorithm for complex, which looks like\n// TODO: it may actually be the same one.\n//\n// This function is called ATL_rot in ATLAS 3.8.4.\ntemplate <typename DType>\ninline void rot_helper(const int N, DType* X, const int incX, DType* Y, const int incY, const DType c, const DType s) {\n  if (c != 1 || s != 0) {\n    if (incX == 1 && incY == 1) {\n      for (int i = 0; i != N; ++i) {\n        DType tmp = X[i] * c + Y[i] * s;\n        Y[i]      = Y[i] * c - X[i] * s;\n        X[i]      = tmp;\n      }\n    } else {\n      for (int i = N; i > 0; --i, Y += incY, X += incX) {\n        DType tmp = *X * c + *Y * s;\n        *Y  = *Y * c - *X * s;\n        *X  = tmp;\n      }\n    }\n  }\n}\n\n\n/* Applies a plane rotation. From ATLAS 3.8.4. */\ntemplate <typename DType, typename CSDType>\ninline void rot(const int N, DType* X, const int incX, DType* Y, const int incY, const CSDType c, const CSDType s) {\n  int incx = incX, incy = incY;\n  DType *x = X, *y = Y;\n\n  if (N > 0) {\n    if (incX < 0) {\n      if (incY < 0) { incx = -incx; incy = -incy; }\n      else x += -incX * (N-1);\n    } else if (incY < 0) {\n      incy = -incy;\n      incx = -incx;\n      x += (N-1) * incX;\n    }\n    rot_helper<DType>(N, x, incx, y, incy, c, s);\n  }\n}\n\ntemplate <typename DType, typename CSDType>\ninline void cblas_rot(const int N, void* X, const int incX, void* Y, const int incY, const void* c, const void* s) {\n  rot<DType,CSDType>(N, reinterpret_cast<DType*>(X), incX, reinterpret_cast<DType*>(Y), incY,\n                       *reinterpret_cast<const CSDType*>(c), *reinterpret_cast<const CSDType*>(s));\n}\n\n\n} } //nm::math\n\n#endif // ROT_H\n"
  },
  {
    "path": "ext/nmatrix/math/rotg.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == rotg.h\n//\n// BLAS rotg function in native C++.\n//\n\n/*\n *             Automatically Tuned Linear Algebra Software v3.8.4\n *                    (C) Copyright 1999 R. Clint Whaley\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *   1. Redistributions of source code must retain the above copyright\n *      notice, this list of conditions and the following disclaimer.\n *   2. Redistributions in binary form must reproduce the above copyright\n *      notice, this list of conditions, and the following disclaimer in the\n *      documentation and/or other materials provided with the distribution.\n *   3. The name of the ATLAS group or the names of its contributers may\n *      not be used to endorse or promote products derived from this\n *      software without specific written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\n * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ATLAS GROUP OR ITS CONTRIBUTORS\n * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n#ifndef ROTG_H\n# define ROTG_H\n\nnamespace nm { namespace math {\n\n/* Givens plane rotation. From ATLAS 3.8.4. */\n// FIXME: Not working properly for Ruby objects.\ntemplate <typename DType>\ninline void rotg(DType* a, DType* b, DType* c, DType* s) {\n  DType aa    = std::abs(*a), ab = std::abs(*b);\n  DType roe   = aa > ab ? *a : *b;\n  DType scal  = aa + ab;\n\n  if (scal == 0) {\n    *c =  1;\n    *s = *a = *b = 0;\n  } else {\n    DType t0  = aa / scal, t1 = ab / scal;\n    DType r   = scal * std::sqrt(t0 * t0 + t1 * t1);\n    if (roe < 0) r = -r;\n    *c = *a / r;\n    *s = *b / r;\n    DType z   = (*c != 0) ? (1 / *c) : DType(1);\n    *a = r;\n    *b = z;\n  }\n}\n\ntemplate <>\ninline void rotg(Complex64* a, Complex64* b, Complex64* c, Complex64* s) {\n  rb_raise(rb_eNotImpError, \"BLAS not available, and existing template requires modification for complex\");\n}\n\ntemplate <>\ninline void rotg(Complex128* a, Complex128* b, Complex128* c, Complex128* s) {\n  rb_raise(rb_eNotImpError, \"BLAS not available, and existing template requires modification for complex\");\n}\n\n\ntemplate <typename DType>\ninline void cblas_rotg(void* a, void* b, void* c, void* s) {\n  rotg<DType>(reinterpret_cast<DType*>(a), reinterpret_cast<DType*>(b), reinterpret_cast<DType*>(c), reinterpret_cast<DType*>(s));\n}\n\n\n} } //nm::math\n\n#endif // ROTG_H\n"
  },
  {
    "path": "ext/nmatrix/math/scal.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == scal.h\n//\n// BLAS scal function.\n//\n\n#ifndef SCAL_H\n#define SCAL_H\n\nnamespace nm { namespace math {\n\n/*  Purpose */\n/*  ======= */\n\n/*     DSCAL scales a vector by a constant. */\n/*     uses unrolled loops for increment equal to one. */\n\n/*  Further Details */\n/*  =============== */\n\n/*     jack dongarra, linpack, 3/11/78. */\n/*     modified 3/93 to return if incx .le. 0. */\n/*     modified 12/3/93, array(1) declarations changed to array(*) */\n\n/*  ===================================================================== */\n\ntemplate <typename DType>\ninline void scal(const int n, const DType scalar, DType* x, const int incx) {\n\n  if (n <= 0 || incx <= 0) {\n    return;\n  }\n\n  for (int i = 0; incx < 0 ? i > n*incx : i < n*incx; i += incx) {\n    x[i] = scalar * x[i];\n  }\n}\n\n/*\n * Function signature conversion for LAPACK's scal function.\n */\ntemplate <typename DType>\ninline void cblas_scal(const int n, const void* scalar, void* x, const int incx) {\n  scal<DType>(n, *reinterpret_cast<const DType*>(scalar), reinterpret_cast<DType*>(x), incx);\n}\n\n}} // end of nm::math\n\n#endif\n"
  },
  {
    "path": "ext/nmatrix/math/trsm.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == trsm.h\n//\n// trsm function in native C++.\n//\n/*\n *             Automatically Tuned Linear Algebra Software v3.8.4\n *                    (C) Copyright 1999 R. Clint Whaley\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *   1. Redistributions of source code must retain the above copyright\n *      notice, this list of conditions and the following disclaimer.\n *   2. Redistributions in binary form must reproduce the above copyright\n *      notice, this list of conditions, and the following disclaimer in the\n *      documentation and/or other materials provided with the distribution.\n *   3. The name of the ATLAS group or the names of its contributers may\n *      not be used to endorse or promote products derived from this\n *      software without specific written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\n * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ATLAS GROUP OR ITS CONTRIBUTORS\n * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n#ifndef TRSM_H\n#define TRSM_H\n\n\nnamespace nm { namespace math {\n\n\n/*\n * This version of trsm doesn't do any error checks and only works on column-major matrices.\n *\n * For row major, call trsm<DType> instead. That will handle necessary changes-of-variables\n * and parameter checks.\n *\n * Note that some of the boundary conditions here may be incorrect. Very little has been tested!\n * This was converted directly from dtrsm.f using f2c, and then rewritten more cleanly.\n */\ntemplate <typename DType>\ninline void trsm_nothrow(const enum CBLAS_SIDE side, const enum CBLAS_UPLO uplo,\n                         const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_DIAG diag,\n                         const int m, const int n, const DType alpha, const DType* a,\n                         const int lda, DType* b, const int ldb)\n{\n\n  // (row-major) trsm: left upper trans nonunit m=3 n=1 1/1 a 3 b 3\n\n  if (m == 0 || n == 0) return; /* Quick return if possible. */\n  \n  // Apply necessary offset\n  a -= 1 + lda;\n  b -= 1 + ldb;\n\n  if (alpha == 0) { // Handle alpha == 0\n    for (int j = 1; j <= n; ++j) {\n      for (int i = 1; i <= m; ++i) {\n        b[i + j * ldb] = 0;\n      }\n    }\n    return;\n  }\n\n  if (side == CblasLeft) {\n    if (trans_a == CblasNoTrans) {\n\n      /* Form  B := alpha*inv( A )*B. */\n      if (uplo == CblasUpper) {\n        for (int j = 1; j <= n; ++j) {\n          if (alpha != 1) {\n            for (int i = 1; i <= m; ++i) {\n              b[i + j * ldb] = alpha * b[i + j * ldb];\n            }\n          }\n          for (int k = m; k >= 1; --k) {\n            if (b[k + j * ldb] != 0) {\n              if (diag == CblasNonUnit) {\n                b[k + j * ldb] /= a[k + k * lda];\n              }\n\n              for (int i = 1; i <= k-1; ++i) {\n                b[i + j * ldb] -= b[k + j * ldb] * a[i + k * lda];\n              }\n            }\n          }\n        }\n      } else {\n        for (int j = 1; j <= n; ++j) {\n          if (alpha != 1) {\n            for (int i = 1; i <= m; ++i) {\n              b[i + j * ldb] = alpha * b[i + j * ldb];\n            }\n          }\n          for (int k = 1; k <= m; ++k) {\n            if (b[k + j * ldb] != 0.) {\n              if (diag == CblasNonUnit) {\n                b[k + j * ldb] /= a[k + k * lda];\n              }\n              for (int i = k+1; i <= m; ++i) {\n                b[i + j * ldb] -= b[k + j * ldb] * a[i + k * lda];\n              }\n            }\n          }\n        }\n      }\n    } else { // CblasTrans\n\n      /*           Form  B := alpha*inv( A**T )*B. */\n      if (uplo == CblasUpper) {\n        for (int j = 1; j <= n; ++j) {\n          for (int i = 1; i <= m; ++i) {\n            DType temp = alpha * b[i + j * ldb];\n            for (int k = 1; k <= i-1; ++k) { // limit was i-1. Lots of similar bugs in this code, probably.\n              temp -= a[k + i * lda] * b[k + j * ldb];\n            }\n            if (diag == CblasNonUnit) {\n              temp /= a[i + i * lda];\n            }\n            b[i + j * ldb] = temp;\n          }\n        }\n      } else {\n        for (int j = 1; j <= n; ++j) {\n          for (int i = m; i >= 1; --i) {\n            DType temp= alpha * b[i + j * ldb];\n            for (int k = i+1; k <= m; ++k) {\n              temp -= a[k + i * lda] * b[k + j * ldb];\n            }\n            if (diag == CblasNonUnit) {\n              temp /= a[i + i * lda];\n            }\n            b[i + j * ldb] = temp;\n          }\n        }\n      }\n    }\n  } else { // right side\n\n    if (trans_a == CblasNoTrans) {\n\n      /*           Form  B := alpha*B*inv( A ). */\n\n      if (uplo == CblasUpper) {\n        for (int j = 1; j <= n; ++j) {\n          if (alpha != 1) {\n            for (int i = 1; i <= m; ++i) {\n              b[i + j * ldb] = alpha * b[i + j * ldb];\n            }\n          }\n          for (int k = 1; k <= j-1; ++k) {\n            if (a[k + j * lda] != 0) {\n              for (int i = 1; i <= m; ++i) {\n                b[i + j * ldb] -= a[k + j * lda] * b[i + k * ldb];\n              }\n            }\n          }\n          if (diag == CblasNonUnit) {\n            DType temp = 1 / a[j + j * lda];\n            for (int i = 1; i <= m; ++i) {\n              b[i + j * ldb] = temp * b[i + j * ldb];\n            }\n          }\n        }\n      } else {\n        for (int j = n; j >= 1; --j) {\n          if (alpha != 1) {\n            for (int i = 1; i <= m; ++i) {\n              b[i + j * ldb] = alpha * b[i + j * ldb];\n            }\n          }\n\n          for (int k = j+1; k <= n; ++k) {\n            if (a[k + j * lda] != 0.) {\n              for (int i = 1; i <= m; ++i) {\n                b[i + j * ldb] -= a[k + j * lda] * b[i + k * ldb];\n              }\n            }\n          }\n          if (diag == CblasNonUnit) {\n            DType temp = 1 / a[j + j * lda];\n\n            for (int i = 1; i <= m; ++i) {\n              b[i + j * ldb] = temp * b[i + j * ldb];\n            }\n          }\n        }\n      }\n    } else { // CblasTrans\n\n      /*           Form  B := alpha*B*inv( A**T ). */\n\n      if (uplo == CblasUpper) {\n        for (int k = n; k >= 1; --k) {\n          if (diag == CblasNonUnit) {\n            DType temp= 1 / a[k + k * lda];\n            for (int i = 1; i <= m; ++i) {\n              b[i + k * ldb] = temp * b[i + k * ldb];\n            }\n          }\n          for (int j = 1; j <= k-1; ++j) {\n            if (a[j + k * lda] != 0.) {\n              DType temp= a[j + k * lda];\n              for (int i = 1; i <= m; ++i) {\n                b[i + j * ldb] -= temp * b[i + k *  ldb];\n              }\n            }\n          }\n          if (alpha != 1) {\n            for (int i = 1; i <= m; ++i) {\n              b[i + k * ldb] = alpha * b[i + k * ldb];\n            }\n          }\n        }\n      } else {\n        for (int k = 1; k <= n; ++k) {\n          if (diag == CblasNonUnit) {\n            DType temp = 1 / a[k + k * lda];\n            for (int i = 1; i <= m; ++i) {\n              b[i + k * ldb] = temp * b[i + k * ldb];\n            }\n          }\n          for (int j = k+1; j <= n; ++j) {\n            if (a[j + k * lda] != 0.) {\n              DType temp = a[j + k * lda];\n              for (int i = 1; i <= m; ++i) {\n                b[i + j * ldb] -= temp * b[i + k * ldb];\n              }\n            }\n          }\n          if (alpha != 1) {\n            for (int i = 1; i <= m; ++i) {\n              b[i + k * ldb] = alpha * b[i + k * ldb];\n            }\n          }\n        }\n      }\n    }\n  }\n}\n\n/*\n * BLAS' DTRSM function, generalized.\n */\ntemplate <typename DType, typename = typename std::enable_if<!std::is_integral<DType>::value>::type>\ninline void trsm(const enum CBLAS_ORDER order,\n                 const enum CBLAS_SIDE side, const enum CBLAS_UPLO uplo,\n                 const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_DIAG diag,\n                 const int m, const int n, const DType alpha, const DType* a,\n                 const int lda, DType* b, const int ldb)\n{\n  /*using std::cerr;\n  using std::endl;*/\n\n  int                     num_rows_a = n;\n  if (side == CblasLeft)  num_rows_a = m;\n\n  if (lda < std::max(1,num_rows_a)) {\n    fprintf(stderr, \"TRSM: num_rows_a = %d; got lda=%d\\n\", num_rows_a, lda);\n    rb_raise(rb_eArgError, \"TRSM: Expected lda >= max(1, num_rows_a)\");\n  }\n\n  // Test the input parameters.\n  if (order == CblasRowMajor) {\n    if (ldb < std::max(1,n)) {\n      fprintf(stderr, \"TRSM: M=%d; got ldb=%d\\n\", m, ldb);\n      rb_raise(rb_eArgError, \"TRSM: Expected ldb >= max(1,N)\");\n    }\n\n    // For row major, need to switch side and uplo\n    enum CBLAS_SIDE side_ = side == CblasLeft  ? CblasRight : CblasLeft;\n    enum CBLAS_UPLO uplo_ = uplo == CblasUpper ? CblasLower : CblasUpper;\n\n/*\n    cerr << \"(row-major) trsm: \" << (side_ == CblasLeft ? \"left \" : \"right \")\n         << (uplo_ == CblasUpper ? \"upper \" : \"lower \")\n         << (trans_a == CblasTrans ? \"trans \" : \"notrans \")\n         << (diag == CblasNonUnit ? \"nonunit \" : \"unit \")\n         << n << \" \" << m << \" \" << alpha << \" a \" << lda << \" b \" << ldb << endl;\n*/\n    trsm_nothrow<DType>(side_, uplo_, trans_a, diag, n, m, alpha, a, lda, b, ldb);\n\n  } else { // CblasColMajor\n\n    if (ldb < std::max(1,m)) {\n      fprintf(stderr, \"TRSM: M=%d; got ldb=%d\\n\", m, ldb);\n      rb_raise(rb_eArgError, \"TRSM: Expected ldb >= max(1,M)\");\n    }\n/*\n    cerr << \"(col-major) trsm: \" << (side == CblasLeft ? \"left \" : \"right \")\n         << (uplo == CblasUpper ? \"upper \" : \"lower \")\n         << (trans_a == CblasTrans ? \"trans \" : \"notrans \")\n         << (diag == CblasNonUnit ? \"nonunit \" : \"unit \")\n         << m << \" \" << n << \" \" << alpha << \" a \" << lda << \" b \" << ldb << endl;\n*/\n    trsm_nothrow<DType>(side, uplo, trans_a, diag, m, n, alpha, a, lda, b, ldb);\n\n  }\n\n}\n\n} }  // namespace nm::math\n#endif // TRSM_H\n"
  },
  {
    "path": "ext/nmatrix/math/util.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == util.h\n//\n// Collect a few utility functions which convert ruby symbols into arguments\n// that CBLAS or LAPACK can understand: either enum's for CBLAS or char's\n// for LAPACK.\n//\n\n#ifndef UTIL_H\n#define UTIL_H\n\n/* Interprets cblas argument which could be any of false/:no_transpose, :transpose, or :complex_conjugate,\n * into an enum recognized by cblas.\n *\n * Called by nm_cblas_gemm -- basically inline.\n *\n */\nstatic inline enum CBLAS_TRANSPOSE blas_transpose_sym(VALUE op) {\n  if (op == Qfalse || rb_to_id(op) == nm_rb_no_transpose) return CblasNoTrans;\n  else if (rb_to_id(op) == nm_rb_transpose) return CblasTrans;\n  else if (rb_to_id(op) == nm_rb_complex_conjugate) return CblasConjTrans;\n  else rb_raise(rb_eArgError, \"Expected false, :transpose, or :complex_conjugate\");\n  return CblasNoTrans;\n}\n\n/* Interprets transpose argument which could be any of false/:no_transpose, :transpose, or :complex_conjugate,\n * into an character recognized by LAPACKE. LAPACKE uses a different system than CBLAS for this.\n *\n */\nstatic inline char lapacke_transpose_sym(VALUE op) {\n  if (op == Qfalse || rb_to_id(op) == nm_rb_no_transpose) return 'N';\n  else if (rb_to_id(op) == nm_rb_transpose) return 'T';\n  else if (rb_to_id(op) == nm_rb_complex_conjugate) return 'C';\n  else rb_raise(rb_eArgError, \"Expected false, :transpose, or :complex_conjugate\");\n  return 'N';\n}\n\n/*\n * Interprets cblas argument which could be :left or :right\n *\n * Called by nm_cblas_trsm -- basically inline\n */\nstatic inline enum CBLAS_SIDE blas_side_sym(VALUE op) {\n  ID op_id = rb_to_id(op);\n  if (op_id == nm_rb_left)  return CblasLeft;\n  if (op_id == nm_rb_right) return CblasRight;\n  rb_raise(rb_eArgError, \"Expected :left or :right for side argument\");\n  return CblasLeft;\n}\n\n/*\n * Interprets the LAPACK side argument which could be :left or :right\n * \n * Related to obtaining Q in QR factorization after calling lapack_geqrf\n */\n\nstatic inline char lapacke_side_sym(VALUE op) {\n  ID op_id = rb_to_id(op);\n  if (op_id == nm_rb_left)  return 'L';\n  if (op_id == nm_rb_right) return 'R';\n  else rb_raise(rb_eArgError, \"Expected :left or :right for side argument\");\n  return 'L';\n}\n\n/*\n * Interprets cblas argument which could be :upper or :lower\n *\n * Called by nm_cblas_trsm -- basically inline\n */\nstatic inline enum CBLAS_UPLO blas_uplo_sym(VALUE op) {\n  ID op_id = rb_to_id(op);\n  if (op_id == nm_rb_upper) return CblasUpper;\n  if (op_id == nm_rb_lower) return CblasLower;\n  rb_raise(rb_eArgError, \"Expected :upper or :lower for uplo argument\");\n  return CblasUpper;\n}\n\n/*\n * Interprets argument which could be :upper or :lower for LAPACKE\n *\n * Called by nm_cblas_trsm -- basically inline\n */\nstatic inline char lapacke_uplo_sym(VALUE op) {\n  ID op_id = rb_to_id(op);\n  if (op_id == nm_rb_upper) return 'U';\n  if (op_id == nm_rb_lower) return 'L';\n  rb_raise(rb_eArgError, \"Expected :upper or :lower for uplo argument\");\n  return 'U';\n}\n\n/*\n * Interprets cblas argument which could be :unit (true) or :nonunit (false or anything other than true/:unit)\n *\n * Called by nm_cblas_trsm -- basically inline\n */\nstatic inline enum CBLAS_DIAG blas_diag_sym(VALUE op) {\n  if (rb_to_id(op) == nm_rb_unit || op == Qtrue) return CblasUnit;\n  return CblasNonUnit;\n}\n\n/*\n * Interprets cblas argument which could be :row or :col\n * \n * This function, unlike the other ones, works for LAPACKE as well as for CBLAS/CLAPACK.\n * Although LAPACKE calls this an int instead of a enum, the magic values are the same\n * (101 for row-major, 102 for column-major).\n */\nstatic inline enum CBLAS_ORDER blas_order_sym(VALUE op) {\n  if (rb_to_id(op) == rb_intern(\"row\") || rb_to_id(op) == rb_intern(\"row_major\")) return CblasRowMajor;\n  else if (rb_to_id(op) == rb_intern(\"col\") || rb_to_id(op) == rb_intern(\"col_major\") ||\n           rb_to_id(op) == rb_intern(\"column\") || rb_to_id(op) == rb_intern(\"column_major\")) return CblasColMajor;\n  rb_raise(rb_eArgError, \"Expected :row or :col for order argument\");\n  return CblasRowMajor;\n}\n\n/*\n * Interprets lapack jobu and jobvt arguments, for which LAPACK needs character values A, S, O, or N.\n *\n * Called by lapack_gesvd -- basically inline. svd stands for singular value decomposition.\n */\nstatic inline char lapack_svd_job_sym(VALUE op) {\n  if (rb_to_id(op) == rb_intern(\"all\") || rb_to_id(op) == rb_intern(\"a\")) return 'A';\n  else if (rb_to_id(op) == rb_intern(\"return\") || rb_to_id(op) == rb_intern(\"s\")) return 'S';\n  else if (rb_to_id(op) == rb_intern(\"overwrite\") || rb_to_id(op) == rb_intern(\"o\")) return 'O';\n  else if (rb_to_id(op) == rb_intern(\"none\") || rb_to_id(op) == rb_intern(\"n\")) return 'N';\n  else rb_raise(rb_eArgError, \"Expected :all, :return, :overwrite, :none (or :a, :s, :o, :n, respectively)\");\n  return 'a';\n}\n\n/*\n * Interprets lapack jobvl and jobvr arguments, for which LAPACK needs character values N or V.\n *\n * Called by lapack_geev -- basically inline. evd stands for eigenvalue decomposition.\n */\nstatic inline char lapack_evd_job_sym(VALUE op) {\n  if (op == Qfalse || op == Qnil || rb_to_id(op) == rb_intern(\"n\")) return 'N';\n  else return 'V';\n}\n\n#endif\n"
  },
  {
    "path": "ext/nmatrix/math.cpp",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - present, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - present, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == math.cpp\n//\n// Ruby-exposed CBLAS and LAPACK functions that are available without\n// an external library.\n//\n// === Procedure for adding CBLAS functions to math.cpp/math.h:\n//\n// This procedure is written as if for a fictional function with double\n// version dbacon, which we'll say is from CBLAS.\n//\n// 1. Write a default templated version which probably returns a boolean.\n//    Call it bacon, and put it in math.h.\n//\n//    template <typename DType>\n//    bool bacon(const CBLAS_TRANSPOSE trans, const int M, const int N, DType* A, ...) {\n//      rb_raise(rb_eNotImpError, \"only implemented for ATLAS types (float32, float64, complex64, complex128)\");\n//    }\n//\n//    Make sure this is in namespace nm::math\n//\n// 2. In math.cpp, add a templated inline static version of the function which takes\n//    only void* pointers and uses static_cast to convert them to the\n//    proper dtype. This should also be in namespace nm::math\n//\n//    This function may also need to switch m and n if these arguments are given.\n//\n//    For an example, see cblas_gemm. This function should do nothing other than cast\n//    appropriately. If cblas_dbacon, cblas_sbacon, cblas_cbacon, and cblas_zbacon\n//    all take void* only, and no other pointers that vary between functions, you can skip\n//    this particular step -- as we can call them directly using a custom function pointer\n//    array (same function signature!).\n//\n//    This version of the function will be the one exposed through NMatrix::BLAS. We\n//    want it to be as close to the actual BLAS version of the function as possible,\n//    and with as few checks as possible.\n//\n//    You will probably need a forward declaration in the extern \"C\" block.\n//\n//    Note: In that case, the function you wrote in Step 1 should also take exactly the\n//    same arguments as cblas_xbacon. Otherwise Bad Things will happen.\n//\n// 3. In cblas_templates_core.h, add a default template like in step 1 (which will just\n//    call nm::math::bacon()) and also\n//    inline specialized versions of bacon for the different BLAS types.\n//    This will allow both nmatrix-atlas and nmatrix-lapacke to use the optimized version\n//    of bacon from whatever external library is available, as well as the internal version\n//    if an external version is not available. These functions will end up in a namsespace\n//    like nm::math::atlas, but don't explicitly put them in a namespace, they will get\n//    put in the appropriate namespace when cblas_templates_core.h is included.\n//\n//    template <typename DType>\n//    inline bool bacon(const CBLAS_TRANSPOSE trans, const int M, const int N, DType* A, ...) {\n//      nm::math::bacon(trans, M, N, A, ...);\n//    }\n//\n//    template <>\n//    inline bool bacon(const CBLAS_TRANSPOSE trans, const int M, const int N, float* A, ...) {\n//      cblas_sbacon(trans, M, N, A, ...);\n//      return true;\n//    }\n//\n//    Note that you should do everything in your power here to parse any return values\n//    cblas_sbacon may give you. We're not trying very hard in this example, but you might\n//    look at getrf to see how it might be done.\n//\n// 4. Write the C function nm_cblas_bacon, which is what Ruby will call. Use the example\n//    of nm_cblas_gemm below. Also you must add a similar function in math_atlas.cpp\n//    and math_lapacke.cpp\n//\n// 5. Expose the function in nm_math_init_blas(), in math.cpp:\n//\n//    rb_define_singleton_method(cNMatrix_Internal_BLAS, \"cblas_bacon\", (METHOD)nm_cblas_bacon, 5);\n//\n//    Do something similar in math_atlas.cpp and math_lapacke.cpp to add the function\n//    to the plugin gems.\n//\n//    Here, we're telling Ruby that nm_cblas_bacon takes five arguments as a Ruby function.\n//\n// 6. In blas.rb, write a bacon function which accesses cblas_bacon, but does all the\n//    sanity checks we left out in step 2.\n//\n// 7. Write tests for NMatrix::BLAS::bacon, confirming that it works for the ATLAS dtypes.\n//\n// 8. After you get it working properly with CBLAS, download dbacon.f from NETLIB, and use\n//    f2c to convert it to C. Clean it up so it's readable. Remove the extra indices -- f2c\n//    inserts a lot of unnecessary stuff.\n//\n//    Copy and paste the output into the default templated function you wrote in Step 1.\n//    Fix it so it works as a template instead of just for doubles.\n//\n//    Because of step 3, this will automatically also work for the nmatrix-atlas\n//    and nmatrix-lapacke implementations.\n//\n// 9. Write tests to confirm that it works for all data types.\n//\n// 10. See about adding a Ruby-like interface, such as matrix_matrix_multiply for cblas_gemm,\n//    or matrix_vector_multiply for cblas_gemv. This step is not mandatory.\n//\n// 11. Pull request!\n\n/*\n * Project Includes\n */\n\n\n#include <ruby.h>\n#include <algorithm>\n#include <limits>\n#include <cmath>\n\n#include \"math/cblas_enums.h\"\n\n#include \"data/data.h\"\n#include \"math/magnitude.h\"\n#include \"math/imax.h\"\n#include \"math/scal.h\"\n#include \"math/laswp.h\"\n#include \"math/trsm.h\"\n#include \"math/gemm.h\"\n#include \"math/gemv.h\"\n#include \"math/asum.h\"\n#include \"math/nrm2.h\"\n#include \"math/getrf.h\"\n#include \"math/getrs.h\"\n#include \"math/rot.h\"\n#include \"math/rotg.h\"\n#include \"math/math.h\"\n#include \"math/util.h\"\n#include \"storage/dense/dense.h\"\n\n#include \"nmatrix.h\"\n#include \"ruby_constants.h\"\n\n/*\n * Forward Declarations\n */\n\nextern \"C\" {\n  /* BLAS Level 1. */\n  static VALUE nm_cblas_scal(VALUE self, VALUE n, VALUE scale, VALUE vector, VALUE incx);\n  static VALUE nm_cblas_nrm2(VALUE self, VALUE n, VALUE x, VALUE incx);\n  static VALUE nm_cblas_asum(VALUE self, VALUE n, VALUE x, VALUE incx);\n  static VALUE nm_cblas_rot(VALUE self, VALUE n, VALUE x, VALUE incx, VALUE y, VALUE incy, VALUE c, VALUE s);\n  static VALUE nm_cblas_rotg(VALUE self, VALUE ab);\n  static VALUE nm_cblas_imax(VALUE self, VALUE n, VALUE x, VALUE incx);\n\n  /* BLAS Level 2. */\n  static VALUE nm_cblas_gemv(VALUE self, VALUE trans_a, VALUE m, VALUE n, VALUE vAlpha, VALUE a, VALUE lda,\n                             VALUE x, VALUE incx, VALUE vBeta, VALUE y, VALUE incy);\n\n  /* BLAS Level 3. */\n  static VALUE nm_cblas_gemm(VALUE self, VALUE order, VALUE trans_a, VALUE trans_b, VALUE m, VALUE n, VALUE k, VALUE vAlpha,\n                             VALUE a, VALUE lda, VALUE b, VALUE ldb, VALUE vBeta, VALUE c, VALUE ldc);\n  static VALUE nm_cblas_trsm(VALUE self, VALUE order, VALUE side, VALUE uplo, VALUE trans_a, VALUE diag, VALUE m, VALUE n,\n                             VALUE vAlpha, VALUE a, VALUE lda, VALUE b, VALUE ldb);\n\n  /* LAPACK. */\n  static VALUE nm_has_clapack(VALUE self);\n  static VALUE nm_clapack_getrf(VALUE self, VALUE order, VALUE m, VALUE n, VALUE a, VALUE lda);\n  static VALUE nm_clapack_getrs(VALUE self, VALUE order, VALUE trans, VALUE n, VALUE nrhs, VALUE a, VALUE lda, VALUE ipiv, VALUE b, VALUE ldb);\n  static VALUE nm_clapack_laswp(VALUE self, VALUE n, VALUE a, VALUE lda, VALUE k1, VALUE k2, VALUE ipiv, VALUE incx);\n} // end of extern \"C\" block\n\n////////////////////\n// Math Functions //\n////////////////////\n\nnamespace nm {\n  namespace math {\n\n    /*\n     * Calculate the determinant for a dense matrix (A [elements]) of size 2 or 3. Return the result.\n     */\n    template <typename DType>\n    void det_exact_from_dense(const int M, const void* A_elements, const int lda, void* result_arg) {\n      DType* result  = reinterpret_cast<DType*>(result_arg);\n      const DType* A = reinterpret_cast<const DType*>(A_elements);\n\n      typename LongDType<DType>::type x, y;\n\n      if (M == 2) {\n        *result = A[0] * A[lda+1] - A[1] * A[lda];\n      } else if (M == 3) {\n        x = A[lda+1] * A[2*lda+2] - A[lda+2] * A[2*lda+1]; // ei - fh\n        y = A[lda] * A[2*lda+2] -   A[lda+2] * A[2*lda];   // fg - di\n        x = A[0]*x - A[1]*y ; // a*(ei-fh) - b*(fg-di)\n\n        y = A[lda] * A[2*lda+1] - A[lda+1] * A[2*lda];    // dh - eg\n        *result = A[2]*y + x; // c*(dh-eg) + _\n      } else if (M < 2) {\n        rb_raise(rb_eArgError, \"can only calculate exact determinant of a square matrix of size 2 or larger\");\n      } else {\n        rb_raise(rb_eNotImpError, \"exact determinant calculation needed for matrices larger than 3x3\");\n      }\n    }\n\n    //we can't do det_exact on byte, because it will want to return a byte (unsigned), but determinants can be negative, even if all elements of the matrix are positive\n    template <>\n    void det_exact_from_dense<uint8_t>(const int M, const void* A_elements, const int lda, void* result_arg) {\n      rb_raise(nm_eDataTypeError, \"cannot call det_exact on unsigned type\");\n    }\n    /*\n     * Calculate the determinant for a yale matrix (storage) of size 2 or 3. Return the result.\n     */\n    template <typename DType>\n    void det_exact_from_yale(const int M, const YALE_STORAGE* storage, const int lda, void* result_arg) {\n      DType* result  = reinterpret_cast<DType*>(result_arg);\n      IType* ija = reinterpret_cast<IType *>(storage->ija);\n      DType* a = reinterpret_cast<DType*>(storage->a);\n      IType col_pos = storage->shape[0] + 1;\n      if (M == 2) {\n        if (ija[2] - ija[0] == 2) {\n          *result = a[0] * a[1] - a[col_pos] * a[col_pos+1];\n        }\n        else { *result = a[0] * a[1]; }\n      } else if (M == 3) {\n        DType m[3][3];\n        for (int i = 0; i < 3; ++i) {\n          m[i][i] = a[i];\n          switch(ija[i+1] - ija[i]) {\n          case 2:\n            m[i][ija[col_pos]] = a[col_pos];\n            m[i][ija[col_pos+1]] = a[col_pos+1];\n            col_pos += 2;\n            break;\n          case 1:\n            m[i][(i+1)%3] = m[i][(i+2)%3] = 0;\n            m[i][ija[col_pos]] = a[col_pos];\n            ++col_pos;\n            break;\n          case 0:\n            m[i][(i+1)%3] = m[i][(i+2)%3] = 0;\n            break;\n          default:\n            rb_raise(rb_eArgError, \"some value in IJA is incorrect!\");\n          }\n        }\n        *result =\n          m[0][0] * m[1][1] * m[2][2] + m[0][1] * m[1][2] * m[2][0] + m[0][2] * m[1][0] * m[2][1]\n        - m[0][0] * m[1][2] * m[2][1] - m[0][1] * m[1][0] * m[2][2] - m[0][2] * m[1][1] * m[2][0];\n\n      } else if (M < 2) {\n        rb_raise(rb_eArgError, \"can only calculate exact determinant of a square matrix of size 2 or larger\");\n      } else {\n        rb_raise(rb_eNotImpError, \"exact determinant calculation needed for matrices larger than 3x3\");\n      }\n    }\n\n    /*\n     * Solve a system of linear equations using forward-substution followed by\n     * back substution from the LU factorization of the matrix of co-efficients.\n     * Replaces x_elements with the result. Works only with non-integer, non-object\n     * data types.\n     *\n     * args - r           -> The number of rows of the matrix.\n     *        lu_elements -> Elements of the LU decomposition of the co-efficients\n     *                       matrix, as a contiguos array.\n     *        b_elements  -> Elements of the the right hand sides, as a contiguous array.\n     *        x_elements  -> The array that will contain the results of the computation.\n     *        pivot       -> Positions of permuted rows.\n     */\n    template <typename DType>\n    void solve(const int r, const void* lu_elements, const void* b_elements, void* x_elements, const int* pivot) {\n      int ii = 0, ip;\n      DType sum;\n\n      const DType* matrix = reinterpret_cast<const DType*>(lu_elements);\n      const DType* b      = reinterpret_cast<const DType*>(b_elements);\n      DType* x            = reinterpret_cast<DType*>(x_elements);\n\n      for (int i = 0; i < r; ++i) { x[i] = b[i]; }\n      for (int i = 0; i < r; ++i) { // forward substitution loop\n        ip = pivot[i];\n        sum = x[ip];\n        x[ip] = x[i];\n\n        if (ii != 0) {\n          for (int j = ii - 1;j < i; ++j) { sum = sum - matrix[i * r + j] * x[j]; }\n        }\n        else if (sum != 0.0) {\n          ii = i + 1;\n        }\n        x[i] = sum;\n      }\n\n      for (int i = r - 1; i >= 0; --i) { // back substitution loop\n        sum = x[i];\n        for (int j = i + 1; j < r; j++) { sum = sum - matrix[i * r + j] * x[j]; }\n        x[i] = sum/matrix[i * r + i];\n      }\n    }\n\n    /*\n     * Calculates in-place inverse of A_elements. Uses Gauss-Jordan elimination technique.\n     * In-place inversion of the matrix saves on memory and time.\n     *\n     * args - M - Shape of the matrix\n     *        a_elements - A duplicate of the original expressed as a contiguos array\n     */\n    template <typename DType>\n    void inverse(const int M, void* a_elements) {\n      DType* matrix   = reinterpret_cast<DType*>(a_elements);\n      int row_index[M]; // arrays for keeping track of column scrambling\n      int col_index[M];\n\n      for (int k = 0;k < M; ++k) {\n        typename MagnitudeDType<DType>::type akk;\n        akk = magnitude( matrix[k * (M + 1)] ); // diagonal element\n\n        int interchange = k;\n\n        for (int row = k + 1; row < M; ++row) {\n          typename MagnitudeDType<DType>::type big;\n          big = magnitude( matrix[M*row + k] ); // element below the temp pivot\n\n          if ( big > akk ) {\n            interchange = row;\n            akk = big;\n          }\n        }\n\n        if (interchange != k) { // check if rows need flipping\n          DType temp;\n\n          for (int col = 0; col < M; ++col) {\n            NM_SWAP(matrix[interchange*M + col], matrix[k*M + col], temp);\n          }\n        }\n\n        row_index[k] = interchange;\n        col_index[k] = k;\n\n        if (matrix[k * (M + 1)] == (DType)(0)) {\n          rb_raise(rb_eZeroDivError, \"Expected Non-Singular Matrix.\");\n        }\n\n        DType pivot = matrix[k * (M + 1)];\n        matrix[k * (M + 1)] = (DType)(1); // set diagonal as 1 for in-place inversion\n\n        for (int col = 0; col < M; ++col) {\n          // divide each element in the kth row with the pivot\n          matrix[k*M + col] = matrix[k*M + col] / pivot;\n        }\n\n        for (int kk = 0; kk < M; ++kk) { // iterate and reduce all rows\n          if (kk == k) continue;\n\n          DType dum = matrix[k + M*kk];\n          matrix[k + M*kk] = (DType)(0); // prepare for inplace inversion\n          for (int col = 0; col < M; ++col) {\n            matrix[M*kk + col] = matrix[M*kk + col] - matrix[M*k + col] * dum;\n          }\n        }\n      }\n\n      // Unscramble columns\n      DType temp;\n\n      for (int k = M - 1; k >= 0; --k) {\n        if (row_index[k] != col_index[k]) {\n\n          for (int row = 0; row < M; ++row) {\n            NM_SWAP(matrix[row * M + row_index[k]], matrix[row * M + col_index[k]],\n              temp);\n          }\n        }\n      }\n    }\n\n    /*\n     * Reduce a square matrix to hessenberg form with householder transforms\n     *\n     * == Arguments\n     *\n     * nrows - The number of rows present in matrix a.\n     * a_elements - Elements of the matrix to be reduced in 1D array form.\n     *\n     * == References\n     *\n     * http://www.mymathlib.com/c_source/matrices/eigen/hessenberg_orthog.c\n     * This code has been included by permission of the author.\n     */\n    template <typename DType>\n    void hessenberg(const int nrows, void* a_elements) {\n      DType* a = reinterpret_cast<DType*>(a_elements);\n      DType* u = new DType[nrows]; // auxillary storage for the chosen vector\n      DType sum_of_squares, *p_row, *psubdiag, *p_a, scale, innerproduct;\n      int i, k, col;\n\n      // For each column use a Householder transformation to zero all entries\n      // below the subdiagonal.\n      for (psubdiag = a + nrows, col = 0; col < nrows - 2; psubdiag += nrows + 1,\n        col++) {\n        // Calculate the signed square root of the sum of squares of the\n        // elements below the diagonal.\n\n        for (p_a = psubdiag, sum_of_squares = 0.0, i = col + 1; i < nrows;\n          p_a += nrows, i++) {\n          sum_of_squares += *p_a * *p_a;\n        }\n        if (sum_of_squares == 0.0) { continue; }\n        sum_of_squares = std::sqrt(sum_of_squares);\n\n        if ( *psubdiag >= 0.0 ) { sum_of_squares = -sum_of_squares; }\n\n        // Calculate the Householder transformation Q = I - 2uu'/u'u.\n        u[col + 1] = *psubdiag - sum_of_squares;\n        *psubdiag = sum_of_squares;\n\n        for (p_a = psubdiag + nrows, i = col + 2; i < nrows; p_a += nrows, i++) {\n          u[i] = *p_a;\n          *p_a = 0.0;\n        }\n\n        // Premultiply A by Q\n        scale = -1.0 / (sum_of_squares * u[col+1]);\n        for (p_row = psubdiag - col, i = col + 1; i < nrows; i++) {\n          p_a = a + nrows * (col + 1) + i;\n          for (innerproduct = 0.0, k = col + 1; k < nrows; p_a += nrows, k++) {\n            innerproduct += u[k] * *p_a;\n          }\n          innerproduct *= scale;\n          for (p_a = p_row + i, k = col + 1; k < nrows; p_a += nrows, k++) {\n            *p_a -= u[k] * innerproduct;\n          }\n        }\n\n        // Postmultiply QA by Q\n        for (p_row = a, i = 0; i < nrows; p_row += nrows, i++) {\n          for (innerproduct = 0.0, k = col + 1; k < nrows; k++) {\n            innerproduct += u[k] * *(p_row + k);\n          }\n          innerproduct *= scale;\n\n          for (k = col + 1; k < nrows; k++) {\n            *(p_row + k) -= u[k] * innerproduct;\n          }\n        }\n      }\n\n      delete[] u;\n    }\n\n    void raise_not_invertible_error() {\n        rb_raise(nm_eNotInvertibleError,\n            \"matrix must have non-zero determinant to be invertible (not getting this error does not mean matrix is invertible if you're dealing with floating points)\");\n    }\n\n    /*\n     * Calculate the exact inverse for a dense matrix (A [elements]) of size 2 or 3. Places the result in B_elements.\n     */\n    template <typename DType>\n    void inverse_exact_from_dense(const int M, const void* A_elements,\n        const int lda, void* B_elements, const int ldb) {\n\n      const DType* A = reinterpret_cast<const DType*>(A_elements);\n      DType* B       = reinterpret_cast<DType*>(B_elements);\n\n      if (M == 2) {\n        DType det = A[0] * A[lda+1] - A[1] * A[lda];\n        if (det == 0) { raise_not_invertible_error(); }\n        B[0] = A[lda+1] / det;\n        B[1] = -A[1] / det;\n        B[ldb] = -A[lda] / det;\n        B[ldb+1] = A[0] / det;\n\n      } else if (M == 3) {\n        // Calculate the exact determinant.\n        DType det;\n        det_exact_from_dense<DType>(M, A_elements, lda, reinterpret_cast<void*>(&det));\n        if (det == 0) { raise_not_invertible_error(); }\n\n        B[0]      = (  A[lda+1] * A[2*lda+2] - A[lda+2] * A[2*lda+1]) / det; // A = ei - fh\n        B[1]      = (- A[1]     * A[2*lda+2] + A[2]     * A[2*lda+1]) / det; // D = -bi + ch\n        B[2]      = (  A[1]     * A[lda+2]   - A[2]     * A[lda+1])   / det; // G = bf - ce\n        B[ldb]    = (- A[lda]   * A[2*lda+2] + A[lda+2] * A[2*lda])   / det; // B = -di + fg\n        B[ldb+1]  = (  A[0]     * A[2*lda+2] - A[2]     * A[2*lda])   / det; // E = ai - cg\n        B[ldb+2]  = (- A[0]     * A[lda+2]   + A[2]     * A[lda])     / det; // H = -af + cd\n        B[2*ldb]  = (  A[lda]   * A[2*lda+1] - A[lda+1] * A[2*lda])   / det; // C = dh - eg\n        B[2*ldb+1]= ( -A[0]     * A[2*lda+1] + A[1]     * A[2*lda])   / det; // F = -ah + bg\n        B[2*ldb+2]= (  A[0]     * A[lda+1]   - A[1]     * A[lda])     / det; // I = ae - bd\n      } else if (M == 1) {\n        B[0] = 1 / A[0];\n      } else {\n        rb_raise(rb_eNotImpError, \"exact inverse calculation needed for matrices larger than 3x3\");\n      }\n    }\n\n    template <typename DType>\n    void inverse_exact_from_yale(const int M, const YALE_STORAGE* storage,\n        const int lda, YALE_STORAGE* inverse, const int ldb) {\n\n      // inverse is a clone of storage\n      const DType* a = reinterpret_cast<const DType*>(storage->a);\n      const IType* ija = reinterpret_cast<const IType *>(storage->ija);\n      DType* b       = reinterpret_cast<DType*>(inverse->a);\n      IType* ijb = reinterpret_cast<IType *>(inverse->ija);\n      IType col_pos = storage->shape[0] + 1;\n      // Calculate the exact determinant.\n      DType det;\n\n      if (M == 2) {\n        IType ndnz = ija[2] - ija[0];\n        if (ndnz == 2) {\n          det = a[0] * a[1] - a[col_pos] * a[col_pos+1];\n        }\n        else { det = a[0] * a[1]; }\n        if (det == 0) { raise_not_invertible_error(); }\n        b[0] = a[1] / det;\n        b[1] = a[0] / det;\n        if (ndnz == 2) {\n          b[col_pos] = -a[col_pos] / det;\n          b[col_pos+1] = -a[col_pos+1] / det;\n        }\n        else if (ndnz == 1) {\n          b[col_pos] = -a[col_pos] / det;\n        }\n\n      } else if (M == 3) {\n        DType *A = new DType[lda*3];\n        for (int i = 0; i < lda; ++i) {\n          A[i*3+i] = a[i];\n          switch (ija[i+1] - ija[i]) {\n          case 2:\n            A[i*3 + ija[col_pos]] = a[col_pos];\n            A[i*3 + ija[col_pos+1]] = a[col_pos+1];\n            col_pos += 2;\n            break;\n          case 1:\n            A[i*3 + (i+1)%3] = A[i*3 + (i+2)%3] = 0;\n            A[i*3 + ija[col_pos]] = a[col_pos];\n            col_pos += 1;\n            break;\n          case 0:\n            A[i*3 + (i+1)%3] = A[i*3 + (i+2)%3] = 0;\n            break;\n          default:\n            rb_raise(rb_eArgError, \"some value in IJA is incorrect!\");\n          }\n        }\n        det =\n          A[0] * A[lda+1] * A[2*lda+2] + A[1] * A[lda+2] * A[2*lda] + A[2] * A[lda] * A[2*lda+1]\n        - A[0] * A[lda+2] * A[2*lda+1] - A[1] * A[lda] * A[2*lda+2] - A[2] * A[lda+1] * A[2*lda];\n        if (det == 0) { raise_not_invertible_error(); }\n\n        DType *B = new DType[3*ldb];\n        B[0]      = (  A[lda+1] * A[2*lda+2] - A[lda+2] * A[2*lda+1]) / det; // A = ei - fh\n        B[1]      = (- A[1]     * A[2*lda+2] + A[2]     * A[2*lda+1]) / det; // D = -bi + ch\n        B[2]      = (  A[1]     * A[lda+2]   - A[2]     * A[lda+1])   / det; // G = bf - ce\n        B[ldb]    = (- A[lda]   * A[2*lda+2] + A[lda+2] * A[2*lda])   / det; // B = -di + fg\n        B[ldb+1]  = (  A[0]     * A[2*lda+2] - A[2]     * A[2*lda])   / det; // E = ai - cg\n        B[ldb+2]  = (- A[0]     * A[lda+2]   + A[2]     * A[lda])     / det; // H = -af + cd\n        B[2*ldb]  = (  A[lda]   * A[2*lda+1] - A[lda+1] * A[2*lda])   / det; // C = dh - eg\n        B[2*ldb+1]= ( -A[0]     * A[2*lda+1] + A[1]     * A[2*lda])   / det; // F = -ah + bg\n        B[2*ldb+2]= (  A[0]     * A[lda+1]   - A[1]     * A[lda])     / det; // I = ae - bd\n\n        // Calculate the size of ijb and b, then reallocate them.\n        IType ndnz = 0;\n        for (int i = 0; i < 3; ++i) {\n          for (int j = 0; j < 3; ++j) {\n            if (j != i && B[i*ldb + j] != 0) { ++ndnz; }\n          }\n        }\n        inverse->ndnz = ndnz;\n        col_pos = 4; // shape[0] + 1\n        inverse->capacity = 4 + ndnz;\n        NM_REALLOC_N(inverse->a, DType, 4 + ndnz);\n        NM_REALLOC_N(inverse->ija, IType, 4 + ndnz);\n        b = reinterpret_cast<DType*>(inverse->a);\n        ijb = reinterpret_cast<IType *>(inverse->ija);\n\n        for (int i = 0; i < 3; ++i) {\n          ijb[i] = col_pos;\n          for (int j = 0; j < 3; ++j) {\n            if (j == i) {\n              b[i] = B[i*ldb + j];\n            }\n            else if (B[i*ldb + j] != 0) {\n              b[col_pos] = B[i*ldb + j];\n              ijb[col_pos] = j;\n              ++col_pos;\n            }\n          }\n        }\n        b[3] = 0;\n        ijb[3] = col_pos;\n        delete [] B;\n        delete [] A;\n      } else if (M == 1) {\n        b[0] = 1 / a[0];\n      } else {\n        rb_raise(rb_eNotImpError, \"exact inverse calculation needed for matrices larger than 3x3\");\n      }\n    }\n\n    /*\n     * Function signature conversion for calling CBLAS' gemm functions as directly as possible.\n     *\n     * For documentation: http://www.netlib.org/blas/dgemm.f\n     */\n    template <typename DType>\n    inline static void cblas_gemm(const enum CBLAS_ORDER order,\n                                  const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_TRANSPOSE trans_b,\n                                  int m, int n, int k,\n                                  void* alpha,\n                                  void* a, int lda,\n                                  void* b, int ldb,\n                                  void* beta,\n                                  void* c, int ldc)\n    {\n      gemm<DType>(order, trans_a, trans_b, m, n, k, reinterpret_cast<DType*>(alpha),\n                  reinterpret_cast<DType*>(a), lda,\n                  reinterpret_cast<DType*>(b), ldb, reinterpret_cast<DType*>(beta),\n                  reinterpret_cast<DType*>(c), ldc);\n    }\n\n\n    /*\n     * Function signature conversion for calling CBLAS's gemv functions as directly as possible.\n     *\n     * For documentation: http://www.netlib.org/lapack/double/dgetrf.f\n     */\n    template <typename DType>\n    inline static bool cblas_gemv(const enum CBLAS_TRANSPOSE trans,\n                                  const int m, const int n,\n                                  const void* alpha,\n                                  const void* a, const int lda,\n                                  const void* x, const int incx,\n                                  const void* beta,\n                                  void* y, const int incy)\n    {\n      return gemv<DType>(trans,\n                         m, n, reinterpret_cast<const DType*>(alpha),\n                         reinterpret_cast<const DType*>(a), lda,\n                         reinterpret_cast<const DType*>(x), incx, reinterpret_cast<const DType*>(beta),\n                         reinterpret_cast<DType*>(y), incy);\n    }\n\n\n    /*\n     * Function signature conversion for calling CBLAS' trsm functions as directly as possible.\n     *\n     * For documentation: http://www.netlib.org/blas/dtrsm.f\n     */\n    template <typename DType>\n    inline static void cblas_trsm(const enum CBLAS_ORDER order, const enum CBLAS_SIDE side, const enum CBLAS_UPLO uplo,\n                                   const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_DIAG diag,\n                                   const int m, const int n, const void* alpha, const void* a,\n                                   const int lda, void* b, const int ldb)\n    {\n      trsm<DType>(order, side, uplo, trans_a, diag, m, n, *reinterpret_cast<const DType*>(alpha),\n                  reinterpret_cast<const DType*>(a), lda, reinterpret_cast<DType*>(b), ldb);\n    }\n\n  }\n} // end of namespace nm::math\n\n\nextern \"C\" {\n\n///////////////////\n// Ruby Bindings //\n///////////////////\n\nvoid nm_math_init_blas() {\n  VALUE cNMatrix_Internal = rb_define_module_under(cNMatrix, \"Internal\");\n\n  rb_define_singleton_method(cNMatrix, \"has_clapack?\", (METHOD)nm_has_clapack, 0);\n\n  VALUE cNMatrix_Internal_LAPACK = rb_define_module_under(cNMatrix_Internal, \"LAPACK\");\n\n  /* ATLAS-CLAPACK Functions that are implemented internally */\n  rb_define_singleton_method(cNMatrix_Internal_LAPACK, \"clapack_getrf\", (METHOD)nm_clapack_getrf, 5);\n  rb_define_singleton_method(cNMatrix_Internal_LAPACK, \"clapack_getrs\", (METHOD)nm_clapack_getrs, 9);\n  rb_define_singleton_method(cNMatrix_Internal_LAPACK, \"clapack_laswp\", (METHOD)nm_clapack_laswp, 7);\n\n  VALUE cNMatrix_Internal_BLAS = rb_define_module_under(cNMatrix_Internal, \"BLAS\");\n\n  rb_define_singleton_method(cNMatrix_Internal_BLAS, \"cblas_scal\", (METHOD)nm_cblas_scal, 4);\n  rb_define_singleton_method(cNMatrix_Internal_BLAS, \"cblas_nrm2\", (METHOD)nm_cblas_nrm2, 3);\n  rb_define_singleton_method(cNMatrix_Internal_BLAS, \"cblas_asum\", (METHOD)nm_cblas_asum, 3);\n  rb_define_singleton_method(cNMatrix_Internal_BLAS, \"cblas_rot\",  (METHOD)nm_cblas_rot,  7);\n  rb_define_singleton_method(cNMatrix_Internal_BLAS, \"cblas_rotg\", (METHOD)nm_cblas_rotg, 1);\n  rb_define_singleton_method(cNMatrix_Internal_BLAS, \"cblas_imax\", (METHOD)nm_cblas_imax, 3);\n\n  rb_define_singleton_method(cNMatrix_Internal_BLAS, \"cblas_gemm\", (METHOD)nm_cblas_gemm, 14);\n  rb_define_singleton_method(cNMatrix_Internal_BLAS, \"cblas_gemv\", (METHOD)nm_cblas_gemv, 11);\n  rb_define_singleton_method(cNMatrix_Internal_BLAS, \"cblas_trsm\", (METHOD)nm_cblas_trsm, 12);\n}\n\n/*\n * call-seq:\n *     NMatrix::BLAS.cblas_scal(n, alpha, vector, inc) -> NMatrix\n *\n * BLAS level 1 function +scal+. Works with all dtypes.\n *\n * Scale +vector+ in-place by +alpha+ and also return it. The operation is as\n * follows:\n *  x <- alpha * x\n *\n * - +n+ -> Number of elements of +vector+.\n * - +alpha+ -> Scalar value used in the operation.\n * - +vector+ -> NMatrix of shape [n,1] or [1,n]. Modified in-place.\n * - +inc+ -> Increment used in the scaling function. Should generally be 1.\n */\nstatic VALUE nm_cblas_scal(VALUE self, VALUE n, VALUE alpha, VALUE vector, VALUE incx) {\n  nm::dtype_t dtype = NM_DTYPE(vector);\n\n  void* scalar = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n  rubyval_to_cval(alpha, dtype, scalar);\n\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::math::cblas_scal, void, const int n,\n      const void* scalar, void* x, const int incx);\n\n  ttable[dtype](FIX2INT(n), scalar, NM_STORAGE_DENSE(vector)->elements,\n      FIX2INT(incx));\n\n  return vector;\n}\n\n/*\n * Call any of the cblas_xrotg functions as directly as possible.\n *\n * xROTG computes the elements of a Givens plane rotation matrix such that:\n *\n *  |  c s |   | a |   | r |\n *  | -s c | * | b | = | 0 |\n *\n * where r = +- sqrt( a**2 + b**2 ) and c**2 + s**2 = 1.\n *\n * The Givens plane rotation can be used to introduce zero elements into a matrix selectively.\n *\n * This function differs from most of the other raw BLAS accessors. Instead of\n * providing a, b, c, s as arguments, you should only provide a and b (the\n * inputs), and you should provide them as the first two elements of any dense\n * NMatrix type.\n *\n * The outputs [c,s] will be returned in a Ruby Array at the end; the input\n * NMatrix will also be modified in-place.\n *\n * This function, like the other cblas_ functions, does minimal type-checking.\n */\nstatic VALUE nm_cblas_rotg(VALUE self, VALUE ab) {\n  static void (*ttable[nm::NUM_DTYPES])(void* a, void* b, void* c, void* s) = {\n      NULL, NULL, NULL, NULL, NULL, // can't represent c and s as integers, so no point in having integer operations.\n      nm::math::cblas_rotg<float>,\n      nm::math::cblas_rotg<double>,\n      nm::math::cblas_rotg<nm::Complex64>,\n      nm::math::cblas_rotg<nm::Complex128>,\n      NULL //nm::math::cblas_rotg<nm::RubyObject>\n  };\n\n  nm::dtype_t dtype = NM_DTYPE(ab);\n\n  if (!ttable[dtype]) {\n    rb_raise(nm_eDataTypeError, \"this operation undefined for integer vectors\");\n    return Qnil;\n\n  } else {\n    NM_CONSERVATIVE(nm_register_value(&self));\n    NM_CONSERVATIVE(nm_register_value(&ab));\n    void *pC = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]),\n         *pS = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n\n    // extract A and B from the NVector (first two elements)\n    void* pA = NM_STORAGE_DENSE(ab)->elements;\n    void* pB = (char*)(NM_STORAGE_DENSE(ab)->elements) + DTYPE_SIZES[dtype];\n    // c and s are output\n\n    ttable[dtype](pA, pB, pC, pS);\n\n    VALUE result = rb_ary_new2(2);\n\n    if (dtype == nm::RUBYOBJ) {\n      rb_ary_store(result, 0, *reinterpret_cast<VALUE*>(pC));\n      rb_ary_store(result, 1, *reinterpret_cast<VALUE*>(pS));\n    } else {\n      rb_ary_store(result, 0, nm::rubyobj_from_cval(pC, dtype).rval);\n      rb_ary_store(result, 1, nm::rubyobj_from_cval(pS, dtype).rval);\n    }\n    NM_CONSERVATIVE(nm_unregister_value(&ab));\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n    return result;\n  }\n}\n\n\n/*\n * Call any of the cblas_xrot functions as directly as possible.\n *\n * xROT is a BLAS level 1 routine (taking two vectors) which applies a plane rotation.\n *\n * It's tough to find documentation on xROT. Here are what we think the arguments are for:\n *  * n     :: number of elements to consider in x and y\n *  * x     :: a vector (expects an NVector)\n *  * incx  :: stride of x\n *  * y     :: a vector (expects an NVector)\n *  * incy  :: stride of y\n *  * c     :: cosine of the angle of rotation\n *  * s     :: sine of the angle of rotation\n *\n * Note that c and s will be the same dtype as x and y, except when x and y are complex. If x and y are complex, c and s\n * will be float for Complex64 or double for Complex128.\n *\n * You probably don't want to call this function. Instead, why don't you try rot, which is more flexible\n * with its arguments?\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n */\nstatic VALUE nm_cblas_rot(VALUE self, VALUE n, VALUE x, VALUE incx, VALUE y, VALUE incy, VALUE c, VALUE s) {\n  static void (*ttable[nm::NUM_DTYPES])(const int N, void*, const int, void*, const int, const void*, const void*) = {\n      NULL, NULL, NULL, NULL, NULL, // can't represent c and s as integers, so no point in having integer operations.\n      nm::math::cblas_rot<float,float>,\n      nm::math::cblas_rot<double,double>,\n      nm::math::cblas_rot<nm::Complex64,float>,\n      nm::math::cblas_rot<nm::Complex128,double>,\n      nm::math::cblas_rot<nm::RubyObject,nm::RubyObject>\n  };\n\n  nm::dtype_t dtype = NM_DTYPE(x);\n\n\n  if (!ttable[dtype]) {\n    rb_raise(nm_eDataTypeError, \"this operation undefined for integer vectors\");\n    return Qfalse;\n  } else {\n    void *pC, *pS;\n\n    // We need to ensure the cosine and sine arguments are the correct dtype -- which may differ from the actual dtype.\n    if (dtype == nm::COMPLEX64) {\n      pC = NM_ALLOCA_N(float,1);\n      pS = NM_ALLOCA_N(float,1);\n      rubyval_to_cval(c, nm::FLOAT32, pC);\n      rubyval_to_cval(s, nm::FLOAT32, pS);\n    } else if (dtype == nm::COMPLEX128) {\n      pC = NM_ALLOCA_N(double,1);\n      pS = NM_ALLOCA_N(double,1);\n      rubyval_to_cval(c, nm::FLOAT64, pC);\n      rubyval_to_cval(s, nm::FLOAT64, pS);\n    } else {\n      pC = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n      pS = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n      rubyval_to_cval(c, dtype, pC);\n      rubyval_to_cval(s, dtype, pS);\n    }\n\n\n    ttable[dtype](FIX2INT(n), NM_STORAGE_DENSE(x)->elements, FIX2INT(incx), NM_STORAGE_DENSE(y)->elements, FIX2INT(incy), pC, pS);\n\n    return Qtrue;\n  }\n}\n\n\n/*\n * Call any of the cblas_xnrm2 functions as directly as possible.\n *\n * xNRM2 is a BLAS level 1 routine which calculates the 2-norm of an n-vector x.\n *\n * Arguments:\n *  * n     :: length of x, must be at least 0\n *  * x     :: pointer to first entry of input vector\n *  * incx  :: stride of x, must be POSITIVE (ATLAS says non-zero, but 3.8.4 code only allows positive)\n *\n * You probably don't want to call this function. Instead, why don't you try nrm2, which is more flexible\n * with its arguments?\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n */\nstatic VALUE nm_cblas_nrm2(VALUE self, VALUE n, VALUE x, VALUE incx) {\n\n  static void (*ttable[nm::NUM_DTYPES])(const int N, const void* X, const int incX, void* sum) = {\n      NULL, NULL, NULL, NULL, NULL, // no help for integers\n      nm::math::cblas_nrm2<float32_t>,\n      nm::math::cblas_nrm2<float64_t>,\n      nm::math::cblas_nrm2<nm::Complex64>,\n      nm::math::cblas_nrm2<nm::Complex128>,\n      nm::math::cblas_nrm2<nm::RubyObject>\n  };\n\n  nm::dtype_t dtype  = NM_DTYPE(x);\n\n  if (!ttable[dtype]) {\n    rb_raise(nm_eDataTypeError, \"this operation undefined for integer vectors\");\n    return Qnil;\n\n  } else {\n    // Determine the return dtype and allocate it\n    nm::dtype_t rdtype = dtype;\n    if      (dtype == nm::COMPLEX64)  rdtype = nm::FLOAT32;\n    else if (dtype == nm::COMPLEX128) rdtype = nm::FLOAT64;\n\n    void *Result = NM_ALLOCA_N(char, DTYPE_SIZES[rdtype]);\n\n    ttable[dtype](FIX2INT(n), NM_STORAGE_DENSE(x)->elements, FIX2INT(incx), Result);\n\n    return nm::rubyobj_from_cval(Result, rdtype).rval;\n  }\n}\n\n\n\n/*\n * Call any of the cblas_xasum functions as directly as possible.\n *\n * xASUM is a BLAS level 1 routine which calculates the sum of absolute values of the entries\n * of a vector x.\n *\n * Arguments:\n *  * n     :: length of x, must be at least 0\n *  * x     :: pointer to first entry of input vector\n *  * incx  :: stride of x, must be POSITIVE (ATLAS says non-zero, but 3.8.4 code only allows positive)\n *\n * You probably don't want to call this function. Instead, why don't you try asum, which is more flexible\n * with its arguments?\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n */\nstatic VALUE nm_cblas_asum(VALUE self, VALUE n, VALUE x, VALUE incx) {\n\n  static void (*ttable[nm::NUM_DTYPES])(const int N, const void* X, const int incX, void* sum) = {\n      nm::math::cblas_asum<uint8_t>,\n      nm::math::cblas_asum<int8_t>,\n      nm::math::cblas_asum<int16_t>,\n      nm::math::cblas_asum<int32_t>,\n      nm::math::cblas_asum<int64_t>,\n      nm::math::cblas_asum<float32_t>,\n      nm::math::cblas_asum<float64_t>,\n      nm::math::cblas_asum<nm::Complex64>,\n      nm::math::cblas_asum<nm::Complex128>,\n      nm::math::cblas_asum<nm::RubyObject>\n  };\n\n  nm::dtype_t dtype  = NM_DTYPE(x);\n\n  // Determine the return dtype and allocate it\n  nm::dtype_t rdtype = dtype;\n  if      (dtype == nm::COMPLEX64)  rdtype = nm::FLOAT32;\n  else if (dtype == nm::COMPLEX128) rdtype = nm::FLOAT64;\n\n  void *Result = NM_ALLOCA_N(char, DTYPE_SIZES[rdtype]);\n\n  ttable[dtype](FIX2INT(n), NM_STORAGE_DENSE(x)->elements, FIX2INT(incx), Result);\n\n  return nm::rubyobj_from_cval(Result, rdtype).rval;\n}\n\n/*\n * call-seq:\n *    NMatrix::BLAS.cblas_imax(n, vector, inc) -> Fixnum\n *\n * BLAS level 1 routine.\n *\n * Return the index of the largest element of +vector+.\n *\n * - +n+ -> Vector's size. Generally, you can use NMatrix#rows or NMatrix#cols.\n * - +vector+ -> A NMatrix of shape [n,1] or [1,n] with any dtype.\n * - +inc+ -> It's the increment used when searching. Use 1 except if you know\n *   what you're doing.\n */\nstatic VALUE nm_cblas_imax(VALUE self, VALUE n, VALUE x, VALUE incx) {\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::math::cblas_imax, int, const int n, const void* x, const int incx);\n\n  nm::dtype_t dtype = NM_DTYPE(x);\n\n  int index = ttable[dtype](FIX2INT(n), NM_STORAGE_DENSE(x)->elements, FIX2INT(incx));\n\n  // Convert to Ruby's Int value.\n  return INT2FIX(index);\n}\n\n\n/* Call any of the cblas_xgemm functions as directly as possible.\n *\n * The cblas_xgemm functions (dgemm, sgemm, cgemm, and zgemm) define the following operation:\n *\n *    C = alpha*op(A)*op(B) + beta*C\n *\n * where op(X) is one of <tt>op(X) = X</tt>, <tt>op(X) = X**T</tt>, or the complex conjugate of X.\n *\n * Note that this will only work for dense matrices that are of types :float32, :float64, :complex64, and :complex128.\n * Other types are not implemented in BLAS, and while they exist in NMatrix, this method is intended only to\n * expose the ultra-optimized ATLAS versions.\n *\n * == Arguments\n * See: http://www.netlib.org/blas/dgemm.f\n *\n * You probably don't want to call this function. Instead, why don't you try gemm, which is more flexible\n * with its arguments?\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n */\nstatic VALUE nm_cblas_gemm(VALUE self,\n                           VALUE order,\n                           VALUE trans_a, VALUE trans_b,\n                           VALUE m, VALUE n, VALUE k,\n                           VALUE alpha,\n                           VALUE a, VALUE lda,\n                           VALUE b, VALUE ldb,\n                           VALUE beta,\n                           VALUE c, VALUE ldc)\n{\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::math::cblas_gemm, void, const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_TRANSPOSE trans_b, int m, int n, int k, void* alpha, void* a, int lda, void* b, int ldb, void* beta, void* c, int ldc);\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n  void *pAlpha = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]),\n       *pBeta  = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n  rubyval_to_cval(alpha, dtype, pAlpha);\n  rubyval_to_cval(beta, dtype, pBeta);\n\n  ttable[dtype](blas_order_sym(order), blas_transpose_sym(trans_a), blas_transpose_sym(trans_b), FIX2INT(m), FIX2INT(n), FIX2INT(k), pAlpha, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), NM_STORAGE_DENSE(b)->elements, FIX2INT(ldb), pBeta, NM_STORAGE_DENSE(c)->elements, FIX2INT(ldc));\n\n  return c;\n}\n\n\n/* Call any of the cblas_xgemv functions as directly as possible.\n *\n * The cblas_xgemv functions (dgemv, sgemv, cgemv, and zgemv) define the following operation:\n *\n *    y = alpha*op(A)*x + beta*y\n *\n * where op(A) is one of <tt>op(A) = A</tt>, <tt>op(A) = A**T</tt>, or the complex conjugate of A.\n *\n * Note that this will only work for dense matrices that are of types :float32, :float64, :complex64, and :complex128.\n * Other types are not implemented in BLAS, and while they exist in NMatrix, this method is intended only to\n * expose the ultra-optimized ATLAS versions.\n *\n * == Arguments\n * See: http://www.netlib.org/blas/dgemm.f\n *\n * You probably don't want to call this function. Instead, why don't you try cblas_gemv, which is more flexible\n * with its arguments?\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n */\nstatic VALUE nm_cblas_gemv(VALUE self,\n                           VALUE trans_a,\n                           VALUE m, VALUE n,\n                           VALUE alpha,\n                           VALUE a, VALUE lda,\n                           VALUE x, VALUE incx,\n                           VALUE beta,\n                           VALUE y, VALUE incy)\n{\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::math::cblas_gemv, bool, const enum CBLAS_TRANSPOSE, const int, const int, const void*, const void*, const int, const void*, const int, const void*, void*, const int)\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n  void *pAlpha = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]),\n       *pBeta  = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n  rubyval_to_cval(alpha, dtype, pAlpha);\n  rubyval_to_cval(beta, dtype, pBeta);\n\n  return ttable[dtype](blas_transpose_sym(trans_a), FIX2INT(m), FIX2INT(n), pAlpha, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), NM_STORAGE_DENSE(x)->elements, FIX2INT(incx), pBeta, NM_STORAGE_DENSE(y)->elements, FIX2INT(incy)) ? Qtrue : Qfalse;\n}\n\n\nstatic VALUE nm_cblas_trsm(VALUE self,\n                           VALUE order,\n                           VALUE side, VALUE uplo,\n                           VALUE trans_a, VALUE diag,\n                           VALUE m, VALUE n,\n                           VALUE alpha,\n                           VALUE a, VALUE lda,\n                           VALUE b, VALUE ldb)\n{\n  static void (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER, const enum CBLAS_SIDE, const enum CBLAS_UPLO,\n                                        const enum CBLAS_TRANSPOSE, const enum CBLAS_DIAG,\n                                        const int m, const int n, const void* alpha, const void* a,\n                                        const int lda, void* b, const int ldb) = {\n      NULL, NULL, NULL, NULL, NULL, // integers not allowed due to division\n      nm::math::cblas_trsm<float>,\n      nm::math::cblas_trsm<double>,\n      nm::math::cblas_trsm<nm::Complex64>,\n      nm::math::cblas_trsm<nm::Complex128>,\n      nm::math::cblas_trsm<nm::RubyObject>\n  };\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n  if (!ttable[dtype]) {\n    rb_raise(nm_eDataTypeError, \"this matrix operation undefined for integer matrices\");\n  } else {\n    void *pAlpha = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n    rubyval_to_cval(alpha, dtype, pAlpha);\n\n    ttable[dtype](blas_order_sym(order), blas_side_sym(side), blas_uplo_sym(uplo), blas_transpose_sym(trans_a), blas_diag_sym(diag), FIX2INT(m), FIX2INT(n), pAlpha, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), NM_STORAGE_DENSE(b)->elements, FIX2INT(ldb));\n  }\n\n  return Qtrue;\n}\n\n/* Call any of the clapack_xgetrf functions as directly as possible.\n *\n * The clapack_getrf functions (dgetrf, sgetrf, cgetrf, and zgetrf) compute an LU factorization of a general M-by-N\n * matrix A using partial pivoting with row interchanges.\n *\n * The factorization has the form:\n *    A = P * L * U\n * where P is a permutation matrix, L is lower triangular with unit diagonal elements (lower trapezoidal if m > n),\n * and U is upper triangular (upper trapezoidal if m < n).\n *\n * This is the right-looking level 3 BLAS version of the algorithm.\n *\n * == Arguments\n * See: http://www.netlib.org/lapack/double/dgetrf.f\n * (You don't need argument 5; this is the value returned by this function.)\n *\n * You probably don't want to call this function. Instead, why don't you try clapack_getrf, which is more flexible\n * with its arguments?\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n *\n * Returns an array giving the pivot indices (normally these are argument #5).\n */\nstatic VALUE nm_clapack_getrf(VALUE self, VALUE order, VALUE m, VALUE n, VALUE a, VALUE lda) {\n  static int (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER, const int m, const int n, void* a, const int lda, int* ipiv) = {\n      NULL, NULL, NULL, NULL, NULL, // integers not allowed due to division\n      nm::math::clapack_getrf<float>,\n      nm::math::clapack_getrf<double>,\n      nm::math::clapack_getrf<nm::Complex64>,\n      nm::math::clapack_getrf<nm::Complex128>,\n      nm::math::clapack_getrf<nm::RubyObject>\n  };\n\n  int M = FIX2INT(m),\n      N = FIX2INT(n);\n\n  // Allocate the pivot index array, which is of size MIN(M, N).\n  size_t ipiv_size = std::min(M,N);\n  int* ipiv = NM_ALLOCA_N(int, ipiv_size);\n\n  if (!ttable[NM_DTYPE(a)]) {\n    rb_raise(nm_eDataTypeError, \"this matrix operation undefined for integer matrices\");\n  } else {\n    // Call either our version of getrf or the LAPACK version.\n    ttable[NM_DTYPE(a)](blas_order_sym(order), M, N, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), ipiv);\n  }\n\n  // Result will be stored in a. We return ipiv as an array.\n  VALUE ipiv_array = rb_ary_new2(ipiv_size);\n  for (size_t i = 0; i < ipiv_size; ++i) {\n    rb_ary_store(ipiv_array, i, INT2FIX(ipiv[i]));\n  }\n\n  return ipiv_array;\n}\n\n\n/*\n * Call any of the clapack_xgetrs functions as directly as possible.\n */\nstatic VALUE nm_clapack_getrs(VALUE self, VALUE order, VALUE trans, VALUE n, VALUE nrhs, VALUE a, VALUE lda, VALUE ipiv, VALUE b, VALUE ldb) {\n  static int (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE Trans, const int N,\n                                       const int NRHS, const void* A, const int lda, const int* ipiv, void* B,\n                                       const int ldb) = {\n      NULL, NULL, NULL, NULL, NULL, // integers not allowed due to division\n      nm::math::clapack_getrs<float>,\n      nm::math::clapack_getrs<double>,\n      nm::math::clapack_getrs<nm::Complex64>,\n      nm::math::clapack_getrs<nm::Complex128>,\n      nm::math::clapack_getrs<nm::RubyObject>\n  };\n\n  // Allocate the C version of the pivot index array\n  int* ipiv_;\n  if (!RB_TYPE_P(ipiv, T_ARRAY)) {\n    rb_raise(rb_eArgError, \"ipiv must be of type Array\");\n  } else {\n    ipiv_ = NM_ALLOCA_N(int, RARRAY_LEN(ipiv));\n    for (int index = 0; index < RARRAY_LEN(ipiv); ++index) {\n      ipiv_[index] = FIX2INT( RARRAY_AREF(ipiv, index) );\n    }\n  }\n\n  if (!ttable[NM_DTYPE(a)]) {\n    rb_raise(nm_eDataTypeError, \"this matrix operation undefined for integer matrices\");\n  } else {\n\n    // Call either our version of getrs or the LAPACK version.\n    ttable[NM_DTYPE(a)](blas_order_sym(order), blas_transpose_sym(trans), FIX2INT(n), FIX2INT(nrhs), NM_STORAGE_DENSE(a)->elements, FIX2INT(lda),\n                        ipiv_, NM_STORAGE_DENSE(b)->elements, FIX2INT(ldb));\n  }\n\n  // b is both returned and modified directly in the argument list.\n  return b;\n}\n\n/*\n * Simple way to check from within Ruby code if clapack functions are available, without\n * having to wait around for an exception to be thrown.\n */\nstatic VALUE nm_has_clapack(VALUE self) {\n  return Qfalse;\n}\n\n/*\n * Call any of the clapack_xlaswp functions as directly as possible.\n *\n * Note that LAPACK's xlaswp functions accept a column-order matrix, but NMatrix uses row-order. Thus, n should be the\n * number of rows and lda should be the number of columns, no matter what it says in the documentation for dlaswp.f.\n */\nstatic VALUE nm_clapack_laswp(VALUE self, VALUE n, VALUE a, VALUE lda, VALUE k1, VALUE k2, VALUE ipiv, VALUE incx) {\n  static void (*ttable[nm::NUM_DTYPES])(const int n, void* a, const int lda, const int k1, const int k2, const int* ipiv, const int incx) = {\n      nm::math::clapack_laswp<uint8_t>,\n      nm::math::clapack_laswp<int8_t>,\n      nm::math::clapack_laswp<int16_t>,\n      nm::math::clapack_laswp<int32_t>,\n      nm::math::clapack_laswp<int64_t>,\n      nm::math::clapack_laswp<float>,\n      nm::math::clapack_laswp<double>,\n      nm::math::clapack_laswp<nm::Complex64>,\n      nm::math::clapack_laswp<nm::Complex128>,\n      nm::math::clapack_laswp<nm::RubyObject>\n  };\n\n  // Allocate the C version of the pivot index array\n  int* ipiv_;\n  if (!RB_TYPE_P(ipiv, T_ARRAY)) {\n    rb_raise(rb_eArgError, \"ipiv must be of type Array\");\n  } else {\n    ipiv_ = NM_ALLOCA_N(int, RARRAY_LEN(ipiv));\n    for (int index = 0; index < RARRAY_LEN(ipiv); ++index) {\n      ipiv_[index] = FIX2INT( RARRAY_AREF(ipiv, index) );\n    }\n  }\n\n  // Call either our version of laswp or the LAPACK version.\n  ttable[NM_DTYPE(a)](FIX2INT(n), NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), FIX2INT(k1), FIX2INT(k2), ipiv_, FIX2INT(incx));\n\n  // a is both returned and modified directly in the argument list.\n  return a;\n}\n\n\n/*\n * C accessor for calculating an exact determinant. Dense matrix version.\n */\nvoid nm_math_det_exact_from_dense(const int M, const void* elements, const int lda,\n        nm::dtype_t dtype, void* result) {\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::math::det_exact_from_dense, void, const int M,\n          const void* A_elements, const int lda, void* result_arg);\n\n  ttable[dtype](M, elements, lda, result);\n}\n\n/*\n * C accessor for calculating an exact determinant. Yale matrix version.\n */\nvoid nm_math_det_exact_from_yale(const int M, const YALE_STORAGE* storage, const int lda,\n        nm::dtype_t dtype, void* result) {\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::math::det_exact_from_yale, void, const int M,\n          const YALE_STORAGE* storage, const int lda, void* result_arg);\n\n  ttable[dtype](M, storage, lda, result);\n}\n\n/*\n * C accessor for solving a system of linear equations.\n */\nvoid nm_math_solve(VALUE lu, VALUE b, VALUE x, VALUE ipiv) {\n  int* pivot = new int[RARRAY_LEN(ipiv)];\n\n  for (int i = 0; i < RARRAY_LEN(ipiv); ++i) {\n    pivot[i] = FIX2INT(rb_ary_entry(ipiv, i));\n  }\n\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::math::solve, void, const int, const void*, const void*, void*, const int*);\n\n  ttable[NM_DTYPE(x)](NM_SHAPE0(b), NM_STORAGE_DENSE(lu)->elements,\n    NM_STORAGE_DENSE(b)->elements, NM_STORAGE_DENSE(x)->elements, pivot);\n}\n\n/*\n * C accessor for reducing a matrix to hessenberg form.\n */\nvoid nm_math_hessenberg(VALUE a) {\n  static void (*ttable[nm::NUM_DTYPES])(const int, void*) = {\n      NULL, NULL, NULL, NULL, NULL, // does not support ints\n      nm::math::hessenberg<float>,\n      nm::math::hessenberg<double>,\n      NULL, NULL, // does not support Complex\n      NULL // no support for Ruby Object\n  };\n\n  ttable[NM_DTYPE(a)](NM_SHAPE0(a), NM_STORAGE_DENSE(a)->elements);\n}\n/*\n * C accessor for calculating an in-place inverse.\n */\nvoid nm_math_inverse(const int M, void* a_elements, nm::dtype_t dtype) {\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::math::inverse, void, const int, void*);\n\n  ttable[dtype](M, a_elements);\n}\n\n/*\n * C accessor for calculating an exact inverse. Dense matrix version.\n */\nvoid nm_math_inverse_exact_from_dense(const int M, const void* A_elements,\n    const int lda, void* B_elements, const int ldb, nm::dtype_t dtype) {\n\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::math::inverse_exact_from_dense, void,\n      const int, const void*, const int, void*, const int);\n\n  ttable[dtype](M, A_elements, lda, B_elements, ldb);\n}\n\n/*\n * C accessor for calculating an exact inverse. Yale matrix version.\n */\nvoid nm_math_inverse_exact_from_yale(const int M, const YALE_STORAGE* storage,\n    const int lda, YALE_STORAGE* inverse, const int ldb, nm::dtype_t dtype) {\n\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::math::inverse_exact_from_yale, void,\n      const int, const YALE_STORAGE*, const int, YALE_STORAGE*, const int);\n\n  ttable[dtype](M, storage, lda, inverse, ldb);\n}\n\n/*\n * Transpose an array of elements that represent a row-major dense matrix. Does not allocate anything, only does an memcpy.\n */\nvoid nm_math_transpose_generic(const size_t M, const size_t N, const void* A, const int lda, void* B, const int ldb, size_t element_size) {\n  for (size_t i = 0; i < N; ++i) {\n    for (size_t j = 0; j < M; ++j) {\n\n      memcpy(reinterpret_cast<char*>(B) + (i*ldb+j)*element_size,\n             reinterpret_cast<const char*>(A) + (j*lda+i)*element_size,\n             element_size);\n\n    }\n  }\n}\n\n\n} // end of extern \"C\" block\n"
  },
  {
    "path": "ext/nmatrix/nm_memory.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == nm_memory.h\n//\n// Macros for memory allocation and freeing\n\n/**\n * We define these macros, which just call the ruby ones, as this makes\n * debugging memory issues (particularly those involving interaction with\n * the ruby GC) easier, as it's posssible to add debugging code temporarily.\n */\n#ifndef __NM_MEMORY_H__\n#define __NM_MEMORY_H__\n\n#include <ruby.h>\n\n#define NM_ALLOC(type) (ALLOC(type))\n\n#define NM_ALLOC_N(type, n) (ALLOC_N(type, n))\n\n#define NM_REALLOC_N(var, type, n) (REALLOC_N(var, type, n))\n\n#define NM_ALLOCA_N(type, n) (ALLOCA_N(type, n))\n\n#define NM_FREE(var) (xfree(var))\n\n#define NM_ALLOC_NONRUBY(type) ((type*) malloc(sizeof(type)))\n\n//Defines whether to do conservative gc registrations, i.e. those\n//registrations that we're not that sure are necessary.\n//#define NM_GC_CONSERVATIVE\n\n#ifdef NM_GC_CONSERVATIVE\n#define NM_CONSERVATIVE(statement) (statement)\n#else\n#define NM_CONSERVATIVE(statement)\n#endif //NM_GC_CONSERVATIVE\n\n#endif\n"
  },
  {
    "path": "ext/nmatrix/nmatrix.cpp",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == nmatrix.cpp\n//\n// Main C++ source file for NMatrix. Contains Init_nmatrix and most Ruby\n// instance and class methods for NMatrix. Also responsible for calling Init\n// methods on related modules.\n\n/*\n * Standard Includes\n */\n\n#include <ruby.h>\n#include <cfloat>\n#include <algorithm> // std::min\n#include <fstream>\n\n/*\n * Project Includes\n */\n#include \"nmatrix_config.h\"\n\n#include \"types.h\"\n#include \"data/data.h\"\n#include \"math/math.h\"\n#include \"util/io.h\"\n#include \"storage/storage.h\"\n#include \"storage/list/list.h\"\n#include \"storage/yale/yale.h\"\n\n#include \"nmatrix.h\"\n\n#include \"ruby_constants.h\"\n\n/*\n * Ruby internals\n */\n\n\n/*\n * Macros\n */\n\n\n/*\n * Global Variables\n */\n\nnamespace nm {\n\n  /*\n   * This function is pulled out separately so it can be called for hermitian matrix writing, which also uses it.\n   */\n  template <typename DType>\n  size_t write_padded_dense_elements_upper(std::ofstream& f, DENSE_STORAGE* storage, symm_t symm) {\n    // Write upper triangular portion. Assume 2D square matrix.\n    DType* elements = reinterpret_cast<DType*>(storage->elements);\n    size_t length = storage->shape[0];\n\n    size_t bytes_written = 0;\n\n    for (size_t i = 0; i < length; ++i) { // which row are we on?\n\n      f.write( reinterpret_cast<const char*>( &(elements[ i*(length + 1) ]) ),\n               (length - i) * sizeof(DType) );\n\n      bytes_written += (length - i) * sizeof(DType);\n    }\n    return bytes_written;\n  }\n\n  /*\n   * We need to specialize for Hermitian matrices. The next six functions accomplish that specialization, basically\n   * by ensuring that non-complex matrices cannot read or write hermitians (which would cause big problems).\n   */\n  template <typename DType>\n  size_t write_padded_dense_elements_herm(std::ofstream& f, DENSE_STORAGE* storage, symm_t symm) {\n    rb_raise(rb_eArgError, \"cannot write a non-complex matrix as hermitian\");\n  }\n\n  template <>\n  size_t write_padded_dense_elements_herm<Complex64>(std::ofstream& f, DENSE_STORAGE* storage, symm_t symm) {\n    return write_padded_dense_elements_upper<Complex64>(f, storage, symm);\n  }\n\n  template <>\n  size_t write_padded_dense_elements_herm<Complex128>(std::ofstream& f, DENSE_STORAGE* storage, symm_t symm) {\n    return write_padded_dense_elements_upper<Complex128>(f, storage, symm);\n  }\n\n  template <typename DType>\n  void read_padded_dense_elements_herm(DType* elements, size_t length) {\n    rb_raise(rb_eArgError, \"cannot read a non-complex matrix as hermitian\");\n  }\n\n  template <>\n  void read_padded_dense_elements_herm(Complex64* elements, size_t length) {\n    for (size_t i = 0; i < length; ++i) {\n      for (size_t j = i+1; j < length; ++j) {\n        elements[j * length + i] = elements[i * length + j].conjugate();\n      }\n    }\n  }\n\n  template <>\n  void read_padded_dense_elements_herm(Complex128* elements, size_t length) {\n    for (size_t i = 0; i < length; ++i) {\n      for (size_t j = i+1; j < length; ++j) {\n        elements[j * length + i] = elements[i * length + j].conjugate();\n      }\n    }\n  }\n\n  /*\n   * Read the elements of a dense storage matrix from a binary file, padded to 64-bits.\n   *\n   * storage should already be allocated. No initialization necessary.\n   */\n  template <typename DType>\n  void read_padded_dense_elements(std::ifstream& f, DENSE_STORAGE* storage, nm::symm_t symm) {\n    size_t bytes_read = 0;\n\n    if (symm == nm::NONSYMM) {\n      // Easy. Simply read the whole elements array.\n      size_t length = nm_storage_count_max_elements(reinterpret_cast<STORAGE*>(storage));\n      f.read(reinterpret_cast<char*>(storage->elements), length * sizeof(DType) );\n\n      bytes_read += length * sizeof(DType);\n    } else if (symm == LOWER) {\n\n      // Read lower triangular portion and initialize remainder to 0\n      DType* elements = reinterpret_cast<DType*>(storage->elements);\n      size_t length = storage->shape[0];\n\n      for (size_t i = 0; i < length; ++i) { // which row?\n\n        f.read( reinterpret_cast<char*>(&(elements[i * length])), (i + 1) * sizeof(DType) );\n\n        // need to zero-fill the rest of the row.\n        for (size_t j = i+1; j < length; ++j)\n          elements[i * length + j] = 0;\n\n        bytes_read += (i + 1) * sizeof(DType);\n      }\n    } else {\n\n      DType* elements = reinterpret_cast<DType*>(storage->elements);\n      size_t length = storage->shape[0];\n\n      for (size_t i = 0; i < length; ++i) { // which row?\n        f.read( reinterpret_cast<char*>(&(elements[i * (length + 1)])), (length - i) * sizeof(DType) );\n\n        bytes_read += (length - i) * sizeof(DType);\n      }\n\n      if (symm == SYMM) {\n        for (size_t i = 0; i < length; ++i) {\n          for (size_t j = i+1; j < length; ++j) {\n            elements[j * length + i] = elements[i * length + j];\n          }\n        }\n      } else if (symm == SKEW) {\n        for (size_t i = 0; i < length; ++i) {\n          for (size_t j = i+1; j < length; ++j) {\n            elements[j * length + i] = -elements[i * length + j];\n          }\n        }\n      } else if (symm == HERM) {\n        read_padded_dense_elements_herm<DType>(elements, length);\n\n      } else if (symm == UPPER) { // zero-fill the rest of the rows\n        for (size_t i = 0; i < length; ++i) {\n          for(size_t j = i+1; j < length; ++j) {\n            elements[j * length + i] = 0;\n          }\n        }\n      }\n\n    }\n\n    // Ignore any padding.\n    if (bytes_read % 8) f.ignore(bytes_read % 8);\n  }\n\n  template <typename DType>\n  void write_padded_yale_elements(std::ofstream& f, YALE_STORAGE* storage, size_t length, nm::symm_t symm) {\n    if (symm != nm::NONSYMM) rb_raise(rb_eNotImpError, \"Yale matrices can only be read/written in full form\");\n\n    // Keep track of bytes written for each of A and IJA so we know how much padding to use.\n    size_t bytes_written = length * sizeof(DType);\n\n    // Write A array\n    f.write(reinterpret_cast<const char*>(storage->a), bytes_written);\n\n    // Padding\n    int64_t zero = 0;\n    f.write(reinterpret_cast<const char*>(&zero), bytes_written % 8);\n\n    bytes_written = length * sizeof(IType);\n    f.write(reinterpret_cast<const char*>(storage->ija), bytes_written);\n\n    // More padding\n    f.write(reinterpret_cast<const char*>(&zero), bytes_written % 8);\n  }\n\n\n  template <typename DType>\n  void read_padded_yale_elements(std::ifstream& f, YALE_STORAGE* storage, size_t length, nm::symm_t symm) {\n    if (symm != NONSYMM) rb_raise(rb_eNotImpError, \"Yale matrices can only be read/written in full form\");\n\n    size_t bytes_read = length * sizeof(DType);\n    f.read(reinterpret_cast<char*>(storage->a), bytes_read);\n\n    int64_t padding = 0;\n    f.read(reinterpret_cast<char*>(&padding), bytes_read % 8);\n\n    bytes_read = length * sizeof(IType);\n    f.read(reinterpret_cast<char*>(storage->ija), bytes_read);\n\n    f.read(reinterpret_cast<char*>(&padding), bytes_read % 8);\n  }\n\n  /*\n   * Write the elements of a dense storage matrix to a binary file, padded to 64-bits.\n   */\n  template <typename DType>\n  void write_padded_dense_elements(std::ofstream& f, DENSE_STORAGE* storage, nm::symm_t symm) {\n    size_t bytes_written = 0;\n\n    if (symm == nm::NONSYMM) {\n      // Simply write the whole elements array.\n      size_t length = nm_storage_count_max_elements(storage);\n      f.write(reinterpret_cast<const char*>(storage->elements), length * sizeof(DType));\n\n      bytes_written += length * sizeof(DType);\n\n    } else if (symm == nm::LOWER) {\n\n      // Write lower triangular portion. Assume 2D square matrix.\n      DType* elements = reinterpret_cast<DType*>(storage->elements);\n      size_t length = storage->shape[0];\n      for (size_t i = 0; i < length; ++i) { // which row?\n\n        f.write( reinterpret_cast<const char*>( &(elements[i * length]) ),\n                 (i + 1) * sizeof(DType) );\n\n        bytes_written += (i + 1) * sizeof(DType);\n      }\n    } else if (symm == nm::HERM) {\n      bytes_written += write_padded_dense_elements_herm<DType>(f, storage, symm);\n    } else { // HERM, UPPER, SYMM, SKEW\n      bytes_written += write_padded_dense_elements_upper<DType>(f, storage, symm);\n    }\n\n    // Padding\n    int64_t zero = 0;\n    f.write(reinterpret_cast<const char*>(&zero), bytes_written % 8);\n  }\n\n} // end of namespace nm\n\nextern \"C\" {\n  #include \"ruby_nmatrix.c\"\n} // end of extern \"C\"\n"
  },
  {
    "path": "ext/nmatrix/nmatrix.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == nmatrix.h\n//\n// C and C++ API for NMatrix, and main header file.\n\n#ifndef NMATRIX_H\n#define NMATRIX_H\n\n/*\n * Standard Includes\n */\n\n#include <ruby.h>\n#include \"ruby_constants.h\"\n\n#ifdef __cplusplus\n  #include <cmath>\n  #include <cstring>\n#else\n  #include <math.h>\n  #include <string.h>\n#endif\n\n#ifdef BENCHMARK\n  // SOURCE: http://stackoverflow.com/questions/2349776/how-can-i-benchmark-a-c-program-easily\n  #ifdef __cplusplus\n    #include <sys/ctime>\n    #include <sys/cresource>\n  #else\n    #include <sys/time.h>\n    #include <sys/resource.h>\n  #endif\n#endif\n\n#ifdef __cplusplus\n  #include \"nm_memory.h\"\n#endif\n\n#ifndef RB_BUILTIN_TYPE\n# define RB_BUILTIN_TYPE(obj) BUILTIN_TYPE(obj)\n#endif\n\n#ifndef RB_FLOAT_TYPE_P\n/* NOTE: assume flonum doesn't exist */\n# define RB_FLOAT_TYPE_P(obj) ( \\\n    (!SPECIAL_CONST_P(obj) && BUILTIN_TYPE(obj) == T_FLOAT))\n#endif\n\n#ifndef RB_TYPE_P\n# define RB_TYPE_P(obj, type) ( \\\n    ((type) == T_FIXNUM) ? FIXNUM_P(obj) : \\\n    ((type) == T_TRUE) ? ((obj) == Qtrue) : \\\n    ((type) == T_FALSE) ? ((obj) == Qfalse) : \\\n    ((type) == T_NIL) ? ((obj) == Qnil) : \\\n    ((type) == T_UNDEF) ? ((obj) == Qundef) : \\\n    ((type) == T_SYMBOL) ? SYMBOL_P(obj) : \\\n    ((type) == T_FLOAT) ? RB_FLOAT_TYPE_P(obj) : \\\n    (!SPECIAL_CONST_P(obj) && BUILTIN_TYPE(obj) == (type)))\n#endif\n\n#ifndef FIX_CONST_VALUE_PTR\n# if defined(__fcc__) || defined(__fcc_version) || \\\n    defined(__FCC__) || defined(__FCC_VERSION)\n/* workaround for old version of Fujitsu C Compiler (fcc) */\n#  define FIX_CONST_VALUE_PTR(x) ((const VALUE *)(x))\n# else\n#  define FIX_CONST_VALUE_PTR(x) (x)\n# endif\n#endif\n\n#ifndef HAVE_RB_ARRAY_CONST_PTR\nstatic inline const VALUE *\nrb_array_const_ptr(VALUE a)\n{\n  return FIX_CONST_VALUE_PTR((RBASIC(a)->flags & RARRAY_EMBED_FLAG) ?\n    RARRAY(a)->as.ary : RARRAY(a)->as.heap.ptr);\n}\n#endif\n\n#ifndef RARRAY_CONST_PTR\n# define RARRAY_CONST_PTR(a) rb_array_const_ptr(a)\n#endif\n\n#ifndef RARRAY_AREF\n# define RARRAY_AREF(a, i) (RARRAY_CONST_PTR(a)[i])\n#endif\n\n/*\n * Macros\n */\n\n#define RUBY_ZERO INT2FIX(0)\n\n#ifndef SIZEOF_INT\n  #error SIZEOF_INT undefined\n#else\n  #if SIZEOF_INT == 8\n    #define DEFAULT_DTYPE  INT64\n    #define SIZE_T         INT64\n  #else\n    #if SIZEOF_INT == 4\n      #define DEFAULT_DTYPE INT32\n      #define SIZE_T        INT32\n    #else\n      #if SIZEOF_INT == 2\n        #define DEFAULT_DTYPE INT16\n        #define SIZE_T        INT16\n      #else\n        #error Unhandled SIZEOF_INT -- please #define SIZE_T and DEFAULT_DTYPE manually.\n      #endif\n    #endif\n  #endif\n#endif\n\n/*\n * == Macros for Concurrent C and C++ Header Maintenance\n *\n * These macros look complicated, but they're really not so bad. They're also important: they ensure that whether our\n * header file (nmatrix.h) is read by a C++ or a C compiler, all the same data structures and enumerators exist, albeit\n * with slightly different names.\n *\n * \"But wait,\" you say, \"You use structs. Structs exist in C and C++. Why use a macro to set them up?\"\n *\n * Well, in C, you have to be explicit about what a struct is. You can actually get around that requirement by using a\n * typedef:\n *\n *   typedef struct STORAGE { ... } STORAGE;\n *\n * Also, we use C++ inheritance, which is obviously not allowed in C. So we have to ensure that the base class's members\n * are exposed properly to our child classes.\n *\n * The macros also allow us to put all of our C++ types into namespaces. For C, we prefix everything with either nm_ or\n * NM_ to distinguish our declarations from those in other libraries.\n */\n\n\n#ifdef __cplusplus /* These are the C++ versions of the macros. */\n\n  /*\n   * If no block is given, return an enumerator. This copied straight out of ruby's include/ruby/intern.h.\n   *\n   * rb_enumeratorize is located in enumerator.c.\n   *\n   *    VALUE rb_enumeratorize(VALUE obj, VALUE meth, int argc, VALUE *argv) {\n   *      return enumerator_init(enumerator_allocate(rb_cEnumerator), obj, meth, argc, argv);\n   *    }\n   */\n\n//opening portion -- this allows unregistering any objects in use before returning\n  #define RETURN_SIZED_ENUMERATOR_PRE do { \\\n    if (!rb_block_given_p()) {\n\n//remaining portion\n  #ifdef RUBY_2\n    #ifndef RETURN_SIZED_ENUMERATOR\n      #undef RETURN_SIZED_ENUMERATOR\n      // Ruby 2.0 and higher has rb_enumeratorize_with_size instead of rb_enumeratorize.\n      // We want to support both in the simplest way possible.\n      #define RETURN_SIZED_ENUMERATOR(obj, argc, argv, size_fn) \\\n        return rb_enumeratorize_with_size((obj), ID2SYM(rb_frame_this_func()), (argc), (argv), (size_fn));  \\\n      } \\\n    } while (0)\n    #endif\n  #else\n    #undef RETURN_SIZED_ENUMERATOR\n    #define RETURN_SIZED_ENUMERATOR(obj, argc, argv, size_fn) \\\n      return rb_enumeratorize((obj), ID2SYM(rb_frame_this_func()), (argc), (argv));   \\\n      } \\\n    } while (0)\n  #endif\n\n  #define NM_DECL_ENUM(enum_type, name)   nm::enum_type name\n  #define NM_DECL_STRUCT(type, name)      type          name;\n\n  #define NM_DEF_STORAGE_ELEMENTS    \\\n    NM_DECL_ENUM(dtype_t, dtype);    \\\n    size_t      dim;                 \\\n    size_t*     shape;               \\\n    size_t*     offset;              \\\n    int         count;               \\\n    STORAGE*    src;\n\n  #define NM_DEF_STORAGE_CHILD_STRUCT_PRE(name)    struct name : STORAGE {\n  #define NM_DEF_STORAGE_STRUCT_POST(name)         };\n\n  #define NM_DEF_STORAGE_STRUCT      \\\n  struct STORAGE {                   \\\n    NM_DEF_STORAGE_ELEMENTS;         \\\n  };\n\n  #define NM_DEF_STRUCT_PRE(name)  struct name {\n  #define NM_DEF_STRUCT_POST(name) };\n\n  #define NM_DEF_ENUM(name, ...)          \\\n    namespace nm {                        \\\n      enum name {                         \\\n        __VA_ARGS__                       \\\n      };                                  \\\n    } // end of namespace nm\n\n#else   /* These are the C versions of the macros. */\n\n  #define NM_DECL_ENUM(enum_type, name)   nm_ ## enum_type name\n  #define NM_DECL_STRUCT(type, name)      struct NM_ ## type      name;\n\n  #define NM_DEF_STORAGE_ELEMENTS   \\\n    NM_DECL_ENUM(dtype_t, dtype);   \\\n    size_t      dim;                \\\n    size_t*     shape;              \\\n    size_t*     offset;             \\\n    int         count;              \\\n    NM_DECL_STRUCT(STORAGE*, src);\n  #define NM_DEF_STORAGE_CHILD_STRUCT_PRE(name)  typedef struct NM_ ## name { \\\n                                                   NM_DEF_STORAGE_ELEMENTS;\n\n  #define NM_DEF_STORAGE_STRUCT_POST(name)       } NM_ ## name;\n\n  #define NM_DEF_STORAGE_STRUCT      \\\n  typedef struct NM_STORAGE {        \\\n    NM_DEF_STORAGE_ELEMENTS;         \\\n  } NM_STORAGE;\n\n  #define NM_DEF_STRUCT_PRE(name)                typedef struct NM_ ## name {\n  #define NM_DEF_STRUCT_POST(name)               } NM_ ## name;\n\n  #define NM_DEF_ENUM(name, ...)     \\\n    typedef enum nm_ ## name {       \\\n      __VA_ARGS__                    \\\n    } nm_ ## name;\n\n#endif      /* End of C/C++ Parallel Header Macro Definitions */\n\n\n/*\n * Types\n */\n\n#define NM_NUM_DTYPES 10  // data/data.h\n#define NM_NUM_STYPES 3   // storage/storage.h\n\n//#ifdef __cplusplus\n//namespace nm {\n//#endif\n\n/* Storage Type -- Dense or Sparse */\nNM_DEF_ENUM(stype_t,  DENSE_STORE = 0,\n                      LIST_STORE = 1,\n                      YALE_STORE = 2);\n\n/* Data Type */\nNM_DEF_ENUM(dtype_t,    BYTE                =  0,  // unsigned char\n                        INT8                =  1,  // char\n                        INT16               =  2,  // short\n                        INT32               =  3,  // int\n                        INT64               =  4,  // long\n                        FLOAT32         =  5,  // float\n                        FLOAT64         =  6,  // double\n                        COMPLEX64       =  7,  // Complex64 class\n                        COMPLEX128  =  8,  // Complex128 class\n                        RUBYOBJ         = 9);  // Ruby VALUE type\n\nNM_DEF_ENUM(symm_t,   NONSYMM   = 0,\n                      SYMM      = 1,\n                      SKEW      = 2,\n                      HERM      = 3,\n                      UPPER     = 4,\n                      LOWER     = 5);\n\n//#ifdef __cplusplus\n//}; // end of namespace nm\n//#endif\n\n/* struct STORAGE */\nNM_DEF_STORAGE_STRUCT;\n\n/* Dense Storage */\nNM_DEF_STORAGE_CHILD_STRUCT_PRE(DENSE_STORAGE); // struct DENSE_STORAGE : STORAGE {\n  void*     elements; // should go first to align with void* a in yale and NODE* first in list.\n  size_t*   stride;\nNM_DEF_STORAGE_STRUCT_POST(DENSE_STORAGE);     // };\n\n/* Yale Storage */\nNM_DEF_STORAGE_CHILD_STRUCT_PRE(YALE_STORAGE);\n  void*   a;      // should go first\n  size_t  ndnz; // Strictly non-diagonal non-zero count!\n  size_t  capacity;\n  size_t* ija;\nNM_DEF_STORAGE_STRUCT_POST(YALE_STORAGE);\n\n// FIXME: NODE and LIST should be put in some kind of namespace or something, at least in C++.\nNM_DEF_STRUCT_PRE(NODE); // struct NODE {\n  size_t key;\n  void*  val;\n  NM_DECL_STRUCT(NODE*, next);  // NODE* next;\nNM_DEF_STRUCT_POST(NODE); // };\n\nNM_DEF_STRUCT_PRE(LIST); // struct LIST {\n  NM_DECL_STRUCT(NODE*, first); // NODE* first;\nNM_DEF_STRUCT_POST(LIST); // };\n\n/* List-of-Lists Storage */\nNM_DEF_STORAGE_CHILD_STRUCT_PRE(LIST_STORAGE); // struct LIST_STORAGE : STORAGE {\n  // List storage specific elements.\n  void* default_val;\n  NM_DECL_STRUCT(LIST*, rows); // LIST* rows;\nNM_DEF_STORAGE_STRUCT_POST(LIST_STORAGE);      // };\n\n\n\n/* NMATRIX Object */\nNM_DEF_STRUCT_PRE(NMATRIX);   // struct NMATRIX {\n  NM_DECL_ENUM(stype_t, stype);       // stype_t stype;     // Method of storage (csc, dense, etc).\n  NM_DECL_STRUCT(STORAGE*, storage);  // STORAGE* storage;  // Pointer to storage struct.\nNM_DEF_STRUCT_POST(NMATRIX);  // };\n\n/* Structs for dealing with VALUEs in use so that they don't get GC'd */\n\nNM_DEF_STRUCT_PRE(NM_GC_LL_NODE);       // struct NM_GC_LL_NODE {\n  VALUE* val;                           //   VALUE* val;\n  size_t n;                             //   size_t n;\n  NM_DECL_STRUCT(NM_GC_LL_NODE*, next); //   NM_GC_LL_NODE* next;\nNM_DEF_STRUCT_POST(NM_GC_LL_NODE);      // };\n\nNM_DEF_STRUCT_PRE(NM_GC_HOLDER);        // struct NM_GC_HOLDER {\n  NM_DECL_STRUCT(NM_GC_LL_NODE*, start); //  NM_GC_LL_NODE* start;\nNM_DEF_STRUCT_POST(NM_GC_HOLDER);       // };\n\n#define NM_MAX_RANK 15\n\n#define UnwrapNMatrix(obj,var)  Data_Get_Struct(obj, NMATRIX, var)\n\n#define NM_STORAGE(val)         (NM_STRUCT(val)->storage)\n#ifdef __cplusplus\n  #define NM_STRUCT(val)              ((NMATRIX*)(DATA_PTR(val)))\n  #define NM_STORAGE_LIST(val)        ((LIST_STORAGE*)(NM_STORAGE(val)))\n  #define NM_STORAGE_YALE(val)        ((YALE_STORAGE*)(NM_STORAGE(val)))\n  #define NM_STORAGE_DENSE(val)       ((DENSE_STORAGE*)(NM_STORAGE(val)))\n#else\n  #define NM_STRUCT(val)              ((struct NM_NMATRIX*)(DATA_PTR(val)))\n  #define NM_STORAGE_LIST(val)        ((struct NM_LIST_STORAGE*)(NM_STORAGE(val)))\n  #define NM_STORAGE_YALE(val)        ((struct NM_YALE_STORAGE*)(NM_STORAGE(val)))\n  #define NM_STORAGE_DENSE(val)       ((struct NM_DENSE_STORAGE*)(NM_STORAGE(val)))\n#endif\n\n#define NM_SRC(val)             (NM_STORAGE(val)->src)\n#define NM_DIM(val)             (NM_STORAGE(val)->dim)\n\n// Returns an int corresponding the data type of the nmatrix. See the dtype_t\n// enum for a list of possible data types.\n#define NM_DTYPE(val)           (NM_STORAGE(val)->dtype)\n\n// Returns a number corresponding the storage type of the nmatrix. See the stype_t\n// enum for a list of possible storage types.\n#define NM_STYPE(val)           (NM_STRUCT(val)->stype)\n\n// Get the shape of the ith dimension (int)\n#define NM_SHAPE(val,i)         (NM_STORAGE(val)->shape[(i)])\n\n// Get the shape of the 0th dimension (int)\n#define NM_SHAPE0(val)          (NM_STORAGE(val)->shape[0])\n\n// Get the shape of the 1st dimenension (int)\n#define NM_SHAPE1(val)          (NM_STORAGE(val)->shape[1])\n\n// Get the default value assigned to the nmatrix.\n#define NM_DEFAULT_VAL(val)     (NM_STORAGE_LIST(val)->default_val)\n\n// Number of elements in a dense nmatrix.\n#define NM_DENSE_COUNT(val)     (nm_storage_count_max_elements(NM_STORAGE_DENSE(val)))\n\n// Get a pointer to the array that stores elements in a dense matrix.\n#define NM_DENSE_ELEMENTS(val)  (NM_STORAGE_DENSE(val)->elements)\n#define NM_SIZEOF_DTYPE(val)    (DTYPE_SIZES[NM_DTYPE(val)])\n#define NM_REF(val,slice)       (RefFuncs[NM_STYPE(val)]( NM_STORAGE(val), slice, NM_SIZEOF_DTYPE(val) ))\n\n#define NM_MAX(a,b) (((a)>(b))?(a):(b))\n#define NM_MIN(a,b) (((a)>(b))?(b):(a))\n#define NM_SWAP(a,b,tmp) {(tmp)=(a);(a)=(b);(b)=(tmp);}\n\n#define NM_CHECK_ALLOC(x) if (!x) rb_raise(rb_eNoMemError, \"insufficient memory\");\n\n#define RB_FILE_EXISTS(fn)   (rb_funcall(rb_const_get(rb_cObject, rb_intern(\"File\")), rb_intern(\"exists?\"), 1, (fn)) == Qtrue)\n\n#define IsNMatrixType(v)  (RB_TYPE_P(v, T_DATA) && (RDATA(v)->dfree == (RUBY_DATA_FUNC)nm_delete || RDATA(v)->dfree == (RUBY_DATA_FUNC)nm_delete_ref))\n#define CheckNMatrixType(v)   if (!IsNMatrixType(v)) rb_raise(rb_eTypeError, \"expected NMatrix on left-hand side of operation\");\n\n#define NM_IsNMatrix(obj) \\\n  (rb_obj_is_kind_of(obj, cNMatrix) == Qtrue)\n\n#define NM_IsNVector(obj) \\\n  (rb_obj_is_kind_of(obj, cNVector) == Qtrue)\n\n#define RB_P(OBJ) \\\n  rb_funcall(rb_stderr, rb_intern(\"print\"), 1, rb_funcall(OBJ, rb_intern(\"object_id\"), 0)); \\\n  rb_funcall(rb_stderr, rb_intern(\"puts\"), 1, rb_funcall(OBJ, rb_intern(\"inspect\"), 0));\n\n\n#ifdef __cplusplus\ntypedef VALUE (*METHOD)(...);\n\n//}; // end of namespace nm\n#endif\n\n// In the init code below, we need to use NMATRIX for c++ and NM_NMATRIX for c\n// this macro chooses the correct one:\n#ifdef __cplusplus\n  #define _NMATRIX NMATRIX\n  #define _STORAGE STORAGE\n#else\n  #define _NMATRIX NM_NMATRIX\n  #define _STORAGE NM_STORAGE\n#endif\n\n/*\n * Functions\n */\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n  void Init_nmatrix();\n  // External API\n  VALUE rb_nmatrix_dense_create(NM_DECL_ENUM(dtype_t, dtype), size_t* shape, size_t dim, void* elements, size_t length);\n  VALUE rb_nvector_dense_create(NM_DECL_ENUM(dtype_t, dtype), void* elements, size_t length);\n\n  NM_DECL_ENUM(dtype_t, nm_dtype_guess(VALUE));   // (This is a function)\n  NM_DECL_ENUM(dtype_t, nm_dtype_min(VALUE));\n\n  // Non-API functions needed by other cpp files.\n  _NMATRIX* nm_create(NM_DECL_ENUM(stype_t, stype), _STORAGE* storage);\n  _NMATRIX* nm_cast_with_ctype_args(_NMATRIX* self, NM_DECL_ENUM(stype_t, new_stype), NM_DECL_ENUM(dtype_t, new_dtype), void* init_ptr);\n  VALUE    nm_cast(VALUE self, VALUE new_stype_symbol, VALUE new_dtype_symbol, VALUE init);\n  void     nm_mark(_NMATRIX* mat);\n  void     nm_delete(_NMATRIX* mat);\n  void     nm_delete_ref(_NMATRIX* mat);\n  void     nm_register_values(VALUE* vals, size_t n);\n  void     nm_register_value(VALUE* val);\n  void     nm_unregister_value(VALUE* val);\n  void     nm_unregister_values(VALUE* vals, size_t n);\n  void     nm_register_storage(NM_DECL_ENUM(stype_t, stype), const _STORAGE* storage);\n  void     nm_unregister_storage(NM_DECL_ENUM(stype_t, stype), const _STORAGE* storage);\n  void     nm_register_nmatrix(_NMATRIX* nmatrix);\n  void     nm_unregister_nmatrix(_NMATRIX* nmatrix);\n  void     nm_completely_unregister_value(VALUE* val);\n\n#ifdef __cplusplus\n}\n#endif\n\n#undef _NMATRIX\n#undef _STORAGE\n\n#endif // NMATRIX_H\n"
  },
  {
    "path": "ext/nmatrix/ruby_constants.cpp",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == ruby_symbols.cpp\n//\n// Ruby symbols used throught the NMatrix project.\n\n/*\n * Standard Includes\n */\n\n#include <ruby.h>\n\n/*\n * Global Variables\n */\n\nID  nm_rb_dtype,\n    nm_rb_stype,\n\n    nm_rb_capacity,\n    nm_rb_default,\n\n    nm_rb_real,\n    nm_rb_imag,\n\n    nm_rb_numer,\n    nm_rb_denom,\n\n    nm_rb_complex_conjugate,\n    nm_rb_transpose,\n    nm_rb_no_transpose,\n    nm_rb_left,\n    nm_rb_right,\n    nm_rb_upper,\n    nm_rb_lower,\n    nm_rb_unit,\n    nm_rb_nonunit,\n\n    nm_rb_dense,\n    nm_rb_list,\n    nm_rb_yale,\n\n    nm_rb_row,\n    nm_rb_column,\n    nm_rb_add,\n    nm_rb_sub,\n    nm_rb_mul,\n    nm_rb_div,\n    nm_rb_both,\n    nm_rb_none,\n\n    nm_rb_negate,\n\n    nm_rb_percent,\n    nm_rb_gt,\n    nm_rb_lt,\n    nm_rb_eql,\n    nm_rb_neql,\n    nm_rb_gte,\n    nm_rb_lte,\n\n    nm_rb_hash;\n\nVALUE cNMatrix,\n      cNMatrix_IO,\n      cNMatrix_IO_Matlab,\n      cNMatrix_YaleFunctions,\n\n      cNMatrix_GC_holder,\n\n      nm_eDataTypeError,\n      nm_eConvergenceError,\n      nm_eStorageTypeError,\n      nm_eShapeError,\n      nm_eNotInvertibleError;\n\n/*\n * Functions\n */\n\nvoid nm_init_ruby_constants(void) {\n  nm_rb_dtype             = rb_intern(\"dtype\");\n  nm_rb_stype             = rb_intern(\"stype\");\n\n  nm_rb_capacity          = rb_intern(\"capacity\");\n  nm_rb_default           = rb_intern(\"default\");\n\n  nm_rb_real              = rb_intern(\"real\");\n  nm_rb_imag              = rb_intern(\"imag\");\n\n  nm_rb_numer              = rb_intern(\"numerator\");\n  nm_rb_denom              = rb_intern(\"denominator\");\n\n  nm_rb_complex_conjugate  = rb_intern(\"complex_conjugate\");\n  nm_rb_transpose          = rb_intern(\"transpose\");\n  nm_rb_no_transpose      = rb_intern(\"no_transpose\");\n\n  nm_rb_dense             = rb_intern(\"dense\");\n  nm_rb_list              = rb_intern(\"list\");\n  nm_rb_yale              = rb_intern(\"yale\");\n\n  nm_rb_add                = rb_intern(\"+\");\n  nm_rb_sub                = rb_intern(\"-\");\n  nm_rb_mul                = rb_intern(\"*\");\n  nm_rb_div                = rb_intern(\"/\");\n\n  nm_rb_negate            = rb_intern(\"-@\");\n\n  nm_rb_percent            = rb_intern(\"%\");\n  nm_rb_gt                = rb_intern(\">\");\n  nm_rb_lt                = rb_intern(\"<\");\n  nm_rb_eql                = rb_intern(\"==\");\n  nm_rb_neql              = rb_intern(\"!=\");\n  nm_rb_gte                = rb_intern(\">=\");\n  nm_rb_lte                = rb_intern(\"<=\");\n\n  nm_rb_left              = rb_intern(\"left\");\n  nm_rb_right             = rb_intern(\"right\");\n  nm_rb_upper             = rb_intern(\"upper\");\n  nm_rb_lower             = rb_intern(\"lower\");\n  nm_rb_unit              = rb_intern(\"unit\");\n  nm_rb_nonunit           = rb_intern(\"nonunit\");\n  nm_rb_hash              = rb_intern(\"hash\");\n\n  nm_rb_column            = rb_intern(\"column\");\n  nm_rb_row               = rb_intern(\"row\");\n\n  nm_rb_both              = rb_intern(\"both\");\n  nm_rb_none              = rb_intern(\"none\");\n}\n"
  },
  {
    "path": "ext/nmatrix/ruby_constants.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == data.h\n//\n// Header file for dealing with data types.\n\n#ifndef RUBY_CONSTANTS_H\n#define RUBY_CONSTANTS_H\n\n/*\n * Standard Includes\n */\n\n#include <ruby.h>\n\n/*\n * Data\n */\n\nextern ID nm_rb_dtype,\n          nm_rb_stype,\n\n          nm_rb_capacity,\n          nm_rb_default,\n\n          nm_rb_real,\n          nm_rb_imag,\n\n          nm_rb_numer,\n          nm_rb_denom,\n\n          nm_rb_complex_conjugate,\n          nm_rb_transpose,\n          nm_rb_no_transpose,\n          nm_rb_left,\n          nm_rb_right,\n          nm_rb_upper,\n          nm_rb_lower,\n          nm_rb_unit,\n          nm_rb_nonunit,\n\n          nm_rb_dense,\n          nm_rb_list,\n          nm_rb_yale,\n\n          nm_rb_row,\n          nm_rb_column,\n\n          nm_rb_add,\n          nm_rb_sub,\n          nm_rb_mul,\n          nm_rb_div,\n\n          nm_rb_negate,\n\n          nm_rb_percent,\n          nm_rb_gt,\n          nm_rb_lt,\n          nm_rb_eql,\n          nm_rb_neql,\n          nm_rb_gte,\n          nm_rb_lte,\n\n          nm_rb_hash;\n\nextern VALUE  cNMatrix,\n              cNMatrix_IO,\n              cNMatrix_IO_Matlab,\n              cNMatrix_YaleFunctions,\n\n              cNMatrix_GC_holder,\n\n              nm_eDataTypeError,\n              nm_eConvergenceError,\n              nm_eStorageTypeError,\n              nm_eShapeError,\n              nm_eNotInvertibleError;\n\n/*\n * Functions\n */\n\nvoid nm_init_ruby_constants(void);\n\n#endif // RUBY_CONSTANTS_H\n"
  },
  {
    "path": "ext/nmatrix/ruby_nmatrix.c",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == ruby_nmatrix.c\n//\n// Ruby-facing NMatrix C functions. Not compiled directly -- included\n// into nmatrix.cpp.\n//\n\n/*\n * Forward Declarations\n */\n\nstatic VALUE nm_init(int argc, VALUE* argv, VALUE nm);\nstatic VALUE nm_init_copy(VALUE copy, VALUE original);\nstatic VALUE nm_init_transposed(VALUE self);\nstatic VALUE nm_read(int argc, VALUE* argv, VALUE self);\nstatic VALUE nm_write(int argc, VALUE* argv, VALUE self);\nstatic VALUE nm_init_yale_from_old_yale(VALUE shape, VALUE dtype, VALUE ia, VALUE ja, VALUE a, VALUE from_dtype, VALUE nm);\nstatic VALUE nm_alloc(VALUE klass);\nstatic VALUE nm_dtype(VALUE self);\nstatic VALUE nm_stype(VALUE self);\nstatic VALUE nm_default_value(VALUE self);\nstatic size_t effective_dim(STORAGE* s);\nstatic VALUE nm_effective_dim(VALUE self);\nstatic VALUE nm_dim(VALUE self);\nstatic VALUE nm_offset(VALUE self);\nstatic VALUE nm_shape(VALUE self);\nstatic VALUE nm_supershape(VALUE self);\nstatic VALUE nm_capacity(VALUE self);\nstatic VALUE nm_each_with_indices(VALUE nmatrix);\nstatic VALUE nm_each_stored_with_indices(VALUE nmatrix);\nstatic VALUE nm_each_ordered_stored_with_indices(VALUE nmatrix);\nstatic VALUE nm_map_stored(VALUE nmatrix);\n\nstatic void init_slice_no_alloc(SLICE* slice, size_t dim, int argc, VALUE* arg, size_t* shape);\nstatic VALUE nm_xslice(int argc, VALUE* argv, void* (*slice_func)(const STORAGE*, SLICE*), void (*delete_func)(NMATRIX*), VALUE self);\nstatic VALUE nm_mset(int argc, VALUE* argv, VALUE self);\nstatic VALUE nm_mget(int argc, VALUE* argv, VALUE self);\nstatic VALUE nm_mref(int argc, VALUE* argv, VALUE self);\nstatic VALUE nm_is_ref(VALUE self);\n\nstatic VALUE is_symmetric(VALUE self, bool hermitian);\n\nstatic VALUE nm_guess_dtype(VALUE self, VALUE v);\nstatic VALUE nm_min_dtype(VALUE self, VALUE v);\n\nstatic VALUE nm_data_pointer(VALUE self);\n\n/*\n * Macro defines an element-wise accessor function for some operation.\n *\n * This is only responsible for the Ruby accessor! You still have to write the actual functions, obviously.\n */\n#define DEF_ELEMENTWISE_RUBY_ACCESSOR(oper, name)                 \\\nstatic VALUE nm_ew_##name(VALUE left_val, VALUE right_val) {  \\\n  return elementwise_op(nm::EW_##oper, left_val, right_val);  \\\n}\n\n#define DEF_UNARY_RUBY_ACCESSOR(oper, name)                 \\\nstatic VALUE nm_unary_##name(VALUE self) {  \\\n  return unary_op(nm::UNARY_##oper, self);  \\\n}\n\n#define DEF_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(oper, name) \\\nstatic VALUE nm_noncom_ew_##name(int argc, VALUE* argv, VALUE self) { \\\n  if (argc > 1) { \\\n    return noncom_elementwise_op(nm::NONCOM_EW_##oper, self, argv[0], argv[1]); \\\n  } else { \\\n    return noncom_elementwise_op(nm::NONCOM_EW_##oper, self, argv[0], Qfalse); \\\n  } \\\n}\n\n\n/*\n * Macro declares a corresponding accessor function prototype for some element-wise operation.\n */\n#define DECL_ELEMENTWISE_RUBY_ACCESSOR(name)    static VALUE nm_ew_##name(VALUE left_val, VALUE right_val);\n#define DECL_UNARY_RUBY_ACCESSOR(name)          static VALUE nm_unary_##name(VALUE self);\n#define DECL_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(name)    static VALUE nm_noncom_ew_##name(int argc, VALUE* argv, VALUE self);\n\nDECL_ELEMENTWISE_RUBY_ACCESSOR(add)\nDECL_ELEMENTWISE_RUBY_ACCESSOR(subtract)\nDECL_ELEMENTWISE_RUBY_ACCESSOR(multiply)\nDECL_ELEMENTWISE_RUBY_ACCESSOR(divide)\nDECL_ELEMENTWISE_RUBY_ACCESSOR(power)\nDECL_ELEMENTWISE_RUBY_ACCESSOR(mod)\nDECL_ELEMENTWISE_RUBY_ACCESSOR(eqeq)\nDECL_ELEMENTWISE_RUBY_ACCESSOR(neq)\nDECL_ELEMENTWISE_RUBY_ACCESSOR(lt)\nDECL_ELEMENTWISE_RUBY_ACCESSOR(gt)\nDECL_ELEMENTWISE_RUBY_ACCESSOR(leq)\nDECL_ELEMENTWISE_RUBY_ACCESSOR(geq)\nDECL_UNARY_RUBY_ACCESSOR(sin)\nDECL_UNARY_RUBY_ACCESSOR(cos)\nDECL_UNARY_RUBY_ACCESSOR(tan)\nDECL_UNARY_RUBY_ACCESSOR(asin)\nDECL_UNARY_RUBY_ACCESSOR(acos)\nDECL_UNARY_RUBY_ACCESSOR(atan)\nDECL_UNARY_RUBY_ACCESSOR(sinh)\nDECL_UNARY_RUBY_ACCESSOR(cosh)\nDECL_UNARY_RUBY_ACCESSOR(tanh)\nDECL_UNARY_RUBY_ACCESSOR(asinh)\nDECL_UNARY_RUBY_ACCESSOR(acosh)\nDECL_UNARY_RUBY_ACCESSOR(atanh)\nDECL_UNARY_RUBY_ACCESSOR(exp)\nDECL_UNARY_RUBY_ACCESSOR(log2)\nDECL_UNARY_RUBY_ACCESSOR(log10)\nDECL_UNARY_RUBY_ACCESSOR(sqrt)\nDECL_UNARY_RUBY_ACCESSOR(erf)\nDECL_UNARY_RUBY_ACCESSOR(erfc)\nDECL_UNARY_RUBY_ACCESSOR(cbrt)\nDECL_UNARY_RUBY_ACCESSOR(gamma)\nDECL_UNARY_RUBY_ACCESSOR(negate)\nDECL_UNARY_RUBY_ACCESSOR(floor)\nDECL_UNARY_RUBY_ACCESSOR(ceil)\nDECL_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(atan2)\nDECL_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(ldexp)\nDECL_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(hypot)\n\n//log/round can be unary, but also take a base argument, as with Math.log\nstatic VALUE nm_unary_log(int argc, VALUE* argv, VALUE self);\nstatic VALUE nm_unary_round(int argc, VALUE* argv, VALUE self);\n\nstatic VALUE elementwise_op(nm::ewop_t op, VALUE left_val, VALUE right_val);\nstatic VALUE unary_op(nm::unaryop_t op, VALUE self);\nstatic VALUE noncom_elementwise_op(nm::noncom_ewop_t op, VALUE self, VALUE other, VALUE orderflip);\n\nstatic VALUE nm_symmetric(VALUE self);\nstatic VALUE nm_hermitian(VALUE self);\n\nstatic VALUE nm_eqeq(VALUE left, VALUE right);\n\nstatic VALUE matrix_multiply_scalar(NMATRIX* left, VALUE scalar);\nstatic VALUE matrix_multiply(NMATRIX* left, NMATRIX* right);\nstatic VALUE nm_multiply(VALUE left_v, VALUE right_v);\nstatic VALUE nm_det_exact(VALUE self);\nstatic VALUE nm_hessenberg(VALUE self, VALUE a);\nstatic VALUE nm_inverse(VALUE self, VALUE inverse, VALUE bang);\nstatic VALUE nm_inverse_exact(VALUE self, VALUE inverse, VALUE lda, VALUE ldb);\nstatic VALUE nm_complex_conjugate_bang(VALUE self);\nstatic VALUE nm_reshape_bang(VALUE self, VALUE arg);\n\nstatic nm::dtype_t  interpret_dtype(int argc, VALUE* argv, nm::stype_t stype);\nstatic void*    interpret_initial_value(VALUE arg, nm::dtype_t dtype);\nstatic size_t*  interpret_shape(VALUE arg, size_t* dim);\nstatic nm::stype_t  interpret_stype(VALUE arg);\n\n/* Singleton methods */\nstatic VALUE nm_upcast(VALUE self, VALUE t1, VALUE t2);\n\n\n#ifdef BENCHMARK\nstatic double get_time(void);\n#endif\n\n///////////////////\n// Ruby Bindings //\n///////////////////\n\nvoid Init_nmatrix() {\n\n\n  ///////////////////////\n  // Class Definitions //\n  ///////////////////////\n\n  cNMatrix = rb_define_class(\"NMatrix\", rb_cObject);\n\n  // Special exceptions\n\n  /*\n   * Exception raised when there's a problem with data.\n   */\n  nm_eDataTypeError    = rb_define_class(\"DataTypeError\", rb_eStandardError);\n\n  /*\n   * Exception raised when something goes wrong with the storage of a matrix.\n   */\n  nm_eStorageTypeError = rb_define_class(\"StorageTypeError\", rb_eStandardError);\n\n  /*\n   * Exception raise when the matrix shape is not appropriate for a given operation.\n   */\n  nm_eShapeError = rb_define_class(\"ShapeError\", rb_eStandardError);\n\n  /*\n   * Exception raise when an inverse is requested but the matrix is not invertible.\n   */\n  nm_eNotInvertibleError = rb_define_class(\"NotInvertibleError\", rb_eStandardError);\n\n  /*\n   * :nodoc:\n   * Class that holds values in use by the C code.\n   */\n  cNMatrix_GC_holder = rb_define_class(\"NMGCHolder\", rb_cObject);\n\n\n  ///////////////////\n  // Class Methods //\n  ///////////////////\n\n  rb_define_alloc_func(cNMatrix, nm_alloc);\n\n  ///////////////////////\n  // Singleton Methods //\n  ///////////////////////\n\n  rb_define_singleton_method(cNMatrix, \"upcast\", (METHOD)nm_upcast, 2); /* in ext/nmatrix/nmatrix.cpp */\n  rb_define_singleton_method(cNMatrix, \"guess_dtype\", (METHOD)nm_guess_dtype, 1);\n  rb_define_singleton_method(cNMatrix, \"min_dtype\", (METHOD)nm_min_dtype, 1);\n\n  //////////////////////\n  // Instance Methods //\n  //////////////////////\n\n  rb_define_method(cNMatrix, \"initialize\", (METHOD)nm_init, -1);\n  rb_define_method(cNMatrix, \"initialize_copy\", (METHOD)nm_init_copy, 1);\n  rb_define_singleton_method(cNMatrix, \"read\", (METHOD)nm_read, -1);\n\n  rb_define_method(cNMatrix, \"write\", (METHOD)nm_write, -1);\n\n  // Technically, the following function is a copy constructor.\n  rb_define_protected_method(cNMatrix, \"clone_transpose\", (METHOD)nm_init_transposed, 0);\n\n  rb_define_method(cNMatrix, \"dtype\", (METHOD)nm_dtype, 0);\n  rb_define_method(cNMatrix, \"stype\", (METHOD)nm_stype, 0);\n  rb_define_method(cNMatrix, \"cast_full\",  (METHOD)nm_cast, 3);\n  rb_define_method(cNMatrix, \"default_value\", (METHOD)nm_default_value, 0);\n  rb_define_protected_method(cNMatrix, \"__list_default_value__\", (METHOD)nm_list_default_value, 0);\n  rb_define_protected_method(cNMatrix, \"__yale_default_value__\", (METHOD)nm_yale_default_value, 0);\n\n  rb_define_method(cNMatrix, \"[]\", (METHOD)nm_mref, -1);\n  rb_define_method(cNMatrix, \"slice\", (METHOD)nm_mget, -1);\n  rb_define_method(cNMatrix, \"[]=\", (METHOD)nm_mset, -1);\n  rb_define_method(cNMatrix, \"is_ref?\", (METHOD)nm_is_ref, 0);\n  rb_define_method(cNMatrix, \"dimensions\", (METHOD)nm_dim, 0);\n  rb_define_method(cNMatrix, \"effective_dimensions\", (METHOD)nm_effective_dim, 0);\n\n  rb_define_protected_method(cNMatrix, \"__list_to_hash__\", (METHOD)nm_to_hash, 0); // handles list and dense, which are n-dimensional\n\n  rb_define_method(cNMatrix, \"shape\", (METHOD)nm_shape, 0);\n  rb_define_method(cNMatrix, \"supershape\", (METHOD)nm_supershape, 0);\n  rb_define_method(cNMatrix, \"offset\", (METHOD)nm_offset, 0);\n  rb_define_method(cNMatrix, \"det_exact\", (METHOD)nm_det_exact, 0);\n  rb_define_method(cNMatrix, \"complex_conjugate!\", (METHOD)nm_complex_conjugate_bang, 0);\n\n  rb_define_protected_method(cNMatrix, \"reshape_bang\", (METHOD)nm_reshape_bang, 1);\n\n  // Iterators public methods\n  rb_define_method(cNMatrix, \"each_with_indices\", (METHOD)nm_each_with_indices, 0);\n  rb_define_method(cNMatrix, \"each_stored_with_indices\", (METHOD)nm_each_stored_with_indices, 0);\n  rb_define_method(cNMatrix, \"map_stored\", (METHOD)nm_map_stored, 0);\n  rb_define_method(cNMatrix, \"each_ordered_stored_with_indices\", (METHOD)nm_each_ordered_stored_with_indices, 0);\n\n  // Iterators protected methods\n  rb_define_protected_method(cNMatrix, \"__dense_each__\", (METHOD)nm_dense_each, 0);\n  rb_define_protected_method(cNMatrix, \"__dense_map__\", (METHOD)nm_dense_map, 0);\n  rb_define_protected_method(cNMatrix, \"__dense_map_pair__\", (METHOD)nm_dense_map_pair, 1);\n  rb_define_protected_method(cNMatrix, \"__list_map_merged_stored__\", (METHOD)nm_list_map_merged_stored, 2);\n  rb_define_protected_method(cNMatrix, \"__list_map_stored__\", (METHOD)nm_list_map_stored, 1);\n  rb_define_protected_method(cNMatrix, \"__yale_map_merged_stored__\", (METHOD)nm_yale_map_merged_stored, 2);\n  rb_define_protected_method(cNMatrix, \"__yale_map_stored__\", (METHOD)nm_yale_map_stored, 0);\n  rb_define_protected_method(cNMatrix, \"__yale_stored_diagonal_each_with_indices__\", (METHOD)nm_yale_stored_diagonal_each_with_indices, 0);\n  rb_define_protected_method(cNMatrix, \"__yale_stored_nondiagonal_each_with_indices__\", (METHOD)nm_yale_stored_nondiagonal_each_with_indices, 0);\n\n  rb_define_method(cNMatrix, \"==\",    (METHOD)nm_eqeq,        1);\n\n  rb_define_method(cNMatrix, \"+\",      (METHOD)nm_ew_add,      1);\n  rb_define_method(cNMatrix, \"-\",      (METHOD)nm_ew_subtract,  1);\n  rb_define_method(cNMatrix, \"*\",      (METHOD)nm_ew_multiply,  1);\n  rb_define_method(cNMatrix, \"/\",      (METHOD)nm_ew_divide,    1);\n  rb_define_method(cNMatrix, \"**\",    (METHOD)nm_ew_power,    1);\n  rb_define_method(cNMatrix, \"%\",     (METHOD)nm_ew_mod,      1);\n\n  rb_define_method(cNMatrix, \"atan2\", (METHOD)nm_noncom_ew_atan2, -1);\n  rb_define_method(cNMatrix, \"ldexp\", (METHOD)nm_noncom_ew_ldexp, -1);\n  rb_define_method(cNMatrix, \"hypot\", (METHOD)nm_noncom_ew_hypot, -1);\n\n  rb_define_method(cNMatrix, \"sin\",   (METHOD)nm_unary_sin,   0);\n  rb_define_method(cNMatrix, \"cos\",   (METHOD)nm_unary_cos,   0);\n  rb_define_method(cNMatrix, \"tan\",   (METHOD)nm_unary_tan,   0);\n  rb_define_method(cNMatrix, \"asin\",  (METHOD)nm_unary_asin,  0);\n  rb_define_method(cNMatrix, \"acos\",  (METHOD)nm_unary_acos,  0);\n  rb_define_method(cNMatrix, \"atan\",  (METHOD)nm_unary_atan,  0);\n  rb_define_method(cNMatrix, \"sinh\",  (METHOD)nm_unary_sinh,  0);\n  rb_define_method(cNMatrix, \"cosh\",  (METHOD)nm_unary_cosh,  0);\n  rb_define_method(cNMatrix, \"tanh\",  (METHOD)nm_unary_tanh,  0);\n  rb_define_method(cNMatrix, \"asinh\", (METHOD)nm_unary_asinh, 0);\n  rb_define_method(cNMatrix, \"acosh\", (METHOD)nm_unary_acosh, 0);\n  rb_define_method(cNMatrix, \"atanh\", (METHOD)nm_unary_atanh, 0);\n  rb_define_method(cNMatrix, \"exp\",   (METHOD)nm_unary_exp,   0);\n  rb_define_method(cNMatrix, \"log2\",  (METHOD)nm_unary_log2,  0);\n  rb_define_method(cNMatrix, \"log10\", (METHOD)nm_unary_log10, 0);\n  rb_define_method(cNMatrix, \"sqrt\",  (METHOD)nm_unary_sqrt,  0);\n  rb_define_method(cNMatrix, \"erf\",   (METHOD)nm_unary_erf,   0);\n  rb_define_method(cNMatrix, \"erfc\",  (METHOD)nm_unary_erfc,  0);\n  rb_define_method(cNMatrix, \"cbrt\",  (METHOD)nm_unary_cbrt,  0);\n  rb_define_method(cNMatrix, \"gamma\", (METHOD)nm_unary_gamma, 0);\n  rb_define_method(cNMatrix, \"log\",   (METHOD)nm_unary_log,  -1);\n  rb_define_method(cNMatrix, \"-@\",    (METHOD)nm_unary_negate,0);\n  rb_define_method(cNMatrix, \"floor\", (METHOD)nm_unary_floor, 0);\n  rb_define_method(cNMatrix, \"ceil\", (METHOD)nm_unary_ceil, 0);\n  rb_define_method(cNMatrix, \"round\", (METHOD)nm_unary_round, -1);\n\n\n  rb_define_method(cNMatrix, \"=~\", (METHOD)nm_ew_eqeq, 1);\n  rb_define_method(cNMatrix, \"!~\", (METHOD)nm_ew_neq, 1);\n  rb_define_method(cNMatrix, \"<=\", (METHOD)nm_ew_leq, 1);\n  rb_define_method(cNMatrix, \">=\", (METHOD)nm_ew_geq, 1);\n  rb_define_method(cNMatrix, \"<\", (METHOD)nm_ew_lt, 1);\n  rb_define_method(cNMatrix, \">\", (METHOD)nm_ew_gt, 1);\n\n  /////////////////////////////\n  // Helper Instance Methods //\n  /////////////////////////////\n  rb_define_protected_method(cNMatrix, \"__yale_vector_set__\", (METHOD)nm_vector_set, -1);\n\n  /////////////////////////\n  // Matrix Math Methods //\n  /////////////////////////\n  rb_define_method(cNMatrix, \"dot\", (METHOD)nm_multiply, 1);\n  rb_define_method(cNMatrix, \"symmetric?\", (METHOD)nm_symmetric, 0);\n  rb_define_method(cNMatrix, \"hermitian?\", (METHOD)nm_hermitian, 0);\n  rb_define_method(cNMatrix, \"capacity\", (METHOD)nm_capacity, 0);\n\n  // protected methods\n  rb_define_protected_method(cNMatrix, \"__inverse__\", (METHOD)nm_inverse, 2);\n  rb_define_protected_method(cNMatrix, \"__inverse_exact__\", (METHOD)nm_inverse_exact, 3);\n\n  // private methods\n  rb_define_private_method(cNMatrix, \"__hessenberg__\", (METHOD)nm_hessenberg, 1);\n\n  /////////////////\n  // FFI Methods //\n  /////////////////\n  rb_define_method(cNMatrix, \"data_pointer\", (METHOD)nm_data_pointer, 0);\n\n  /////////////\n  // Aliases //\n  /////////////\n\n  rb_define_alias(cNMatrix, \"dim\", \"dimensions\");\n  rb_define_alias(cNMatrix, \"effective_dim\", \"effective_dimensions\");\n  rb_define_alias(cNMatrix, \"equal?\", \"eql?\");\n\n  ////////////\n  //Epsilons//\n  ////////////\n  rb_define_const(cNMatrix, \"FLOAT64_EPSILON\", rb_const_get(rb_cFloat, rb_intern(\"EPSILON\")));\n  rb_define_const(cNMatrix, \"FLOAT32_EPSILON\", DBL2NUM(FLT_EPSILON));\n\n  ///////////////////////\n  // Symbol Generation //\n  ///////////////////////\n\n  nm_init_ruby_constants();\n\n  //////////////////////////\n  // YaleFunctions module //\n  //////////////////////////\n\n  nm_init_yale_functions();\n\n  /////////////////\n  // BLAS module //\n  /////////////////\n\n  nm_math_init_blas();\n\n  ///////////////\n  // IO module //\n  ///////////////\n  nm_init_io();\n\n  /////////////////////////////////////////////////\n  // Force compilation of necessary constructors //\n  /////////////////////////////////////////////////\n  nm_init_data();\n}\n\n\n//////////////////\n// Ruby Methods //\n//////////////////\n\n/*\n * Allocator.\n */\nstatic VALUE nm_alloc(VALUE klass) {\n  NMATRIX* mat = NM_ALLOC(NMATRIX);\n  mat->storage = NULL;\n\n  // DO NOT MARK This STRUCT. It has no storage allocated, and no stype, so mark will do an invalid something.\n  return Data_Wrap_Struct(klass, NULL, nm_delete, mat);\n}\n\n/*\n * Find the capacity of an NMatrix. The capacity only differs from the size for\n * Yale matrices, which occasionally allocate more space than they need. For\n * list and dense, capacity gives the number of elements in the matrix.\n *\n * If you call this on a slice, it may behave unpredictably. Most likely it'll\n * just return the original matrix's capacity.\n */\nstatic VALUE nm_capacity(VALUE self) {\n  NM_CONSERVATIVE(nm_register_value(&self));\n  VALUE cap;\n\n  switch(NM_STYPE(self)) {\n  case nm::YALE_STORE:\n    cap = UINT2NUM(reinterpret_cast<YALE_STORAGE*>(NM_STORAGE_YALE(self)->src)->capacity);\n    break;\n\n  case nm::DENSE_STORE:\n    cap = UINT2NUM(nm_storage_count_max_elements( NM_STORAGE_DENSE(self) ));\n    break;\n\n  case nm::LIST_STORE:\n    cap = UINT2NUM(nm_list_storage_count_elements( NM_STORAGE_LIST(self) ));\n    break;\n\n  default:\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n    rb_raise(nm_eStorageTypeError, \"unrecognized stype in nm_capacity()\");\n  }\n\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n  return cap;\n}\n\n\n/*\n * Mark function.\n */\nvoid nm_mark(NMATRIX* mat) {\n  STYPE_MARK_TABLE(mark)\n  mark[mat->stype](mat->storage);\n}\n\n\n/*\n * Destructor.\n */\nvoid nm_delete(NMATRIX* mat) {\n  static void (*ttable[nm::NUM_STYPES])(STORAGE*) = {\n    nm_dense_storage_delete,\n    nm_list_storage_delete,\n    nm_yale_storage_delete\n  };\n  ttable[mat->stype](mat->storage);\n\n  NM_FREE(mat);\n}\n\n/*\n * Slicing destructor.\n */\nvoid nm_delete_ref(NMATRIX* mat) {\n  static void (*ttable[nm::NUM_STYPES])(STORAGE*) = {\n    nm_dense_storage_delete_ref,\n    nm_list_storage_delete_ref,\n    nm_yale_storage_delete_ref\n  };\n  ttable[mat->stype](mat->storage);\n\n  NM_FREE(mat);\n}\n\n\n/**\n * These variables hold a linked list of VALUEs that are registered to be in\n * use by nmatrix so that they can be marked when GC runs.\n */\nstatic VALUE* gc_value_holder = NULL;\nstatic NM_GC_HOLDER* gc_value_holder_struct = NULL;\nstatic NM_GC_HOLDER* allocated_pool = NULL; // an object pool for linked list nodes; using pooling is in some cases a substantial performance improvement\n\n/**\n * GC Marking function for the values that have been registered.\n */\nstatic void __nm_mark_value_container(NM_GC_HOLDER* gc_value_holder_struct) {\n  if (gc_value_holder_struct && gc_value_holder_struct->start) {\n    NM_GC_LL_NODE* curr = gc_value_holder_struct->start;\n    while (curr) {\n      rb_gc_mark_locations(curr->val, curr->val + curr->n);\n      curr = curr->next;\n    }\n  }\n}\n\n/**\n * Initilalizes the linked list of in-use VALUEs if it hasn't been done\n * already.\n */\nstatic void __nm_initialize_value_container() {\n  if (gc_value_holder == NULL) {\n    gc_value_holder_struct = NM_ALLOC_NONRUBY(NM_GC_HOLDER);\n    allocated_pool = NM_ALLOC_NONRUBY(NM_GC_HOLDER);\n    gc_value_holder = NM_ALLOC_NONRUBY(VALUE);\n    gc_value_holder_struct->start = NULL;\n    allocated_pool->start = NULL;\n    *gc_value_holder = Data_Wrap_Struct(cNMatrix_GC_holder, __nm_mark_value_container, NULL, gc_value_holder_struct);\n    rb_global_variable(gc_value_holder);\n  }\n}\n\n/*\n * Register an array of VALUEs to avoid their collection\n * while using them internally.\n */\nvoid nm_register_values(VALUE* values, size_t n) {\n  if (!gc_value_holder_struct)\n    __nm_initialize_value_container();\n  if (values) {\n    NM_GC_LL_NODE* to_insert = NULL;\n    if (allocated_pool->start) {\n      to_insert = allocated_pool->start;\n      allocated_pool->start = to_insert->next;\n    } else {\n      to_insert = NM_ALLOC_NONRUBY(NM_GC_LL_NODE);\n    }\n    to_insert->val = values;\n    to_insert->n = n;\n    to_insert->next = gc_value_holder_struct->start;\n    gc_value_holder_struct->start = to_insert;\n  }\n}\n\n/*\n * Unregister an array of VALUEs with the gc to allow normal\n * garbage collection to occur again.\n */\nvoid nm_unregister_values(VALUE* values, size_t n) {\n  if (values) {\n    if (gc_value_holder_struct) {\n      NM_GC_LL_NODE* curr = gc_value_holder_struct->start;\n      NM_GC_LL_NODE* last = NULL;\n      while (curr) {\n        if (curr->val == values) {\n          if (last) {\n            last->next = curr->next;\n          } else {\n            gc_value_holder_struct->start = curr->next;\n          }\n          curr->next = allocated_pool->start;\n          curr->val = NULL;\n          curr->n = 0;\n          allocated_pool->start = curr;\n          break;\n        }\n        last = curr;\n        curr = curr->next;\n      }\n    }\n  }\n}\n\n\n/**\n * Register a single VALUE as in use to avoid garbage collection.\n */\nvoid nm_register_value(VALUE* val) {\n  nm_register_values(val, 1);\n}\n\n/**\n * Unregister a single VALUE to allow normal garbage collection.\n */\nvoid nm_unregister_value(VALUE* val) {\n  nm_unregister_values(val, 1);\n}\n\n/**\n * Removes all instances of a single VALUE in the gc list.  This can be\n * dangerous.  Primarily used when something is about to be\n * freed and replaced so that and residual registrations won't access after\n * free.\n **/\nvoid nm_completely_unregister_value(VALUE* val) {\n  if (gc_value_holder_struct) {\n    NM_GC_LL_NODE* curr = gc_value_holder_struct->start;\n    NM_GC_LL_NODE* last = NULL;\n    while (curr) {\n      if (curr->val == val) {\n        if (last) {\n          last->next = curr->next;\n        } else {\n          gc_value_holder_struct->start = curr->next;\n        }\n        NM_GC_LL_NODE* temp_next = curr->next;\n        curr->next = allocated_pool->start;\n        curr->val = NULL;\n        curr->n = 0;\n        allocated_pool->start = curr;\n        curr = temp_next;\n      } else {\n        last = curr;\n        curr = curr->next;\n      }\n    }\n  }\n}\n\n\n\n/**\n * Register a STORAGE struct of the supplied stype to avoid garbage collection\n * of its internals.\n *\n * Delegates to the storage-specific methods.  They will check dtype and ignore\n * non-rubyobject dtypes, so it's safe to pass any storage in.\n */\nvoid nm_register_storage(nm::stype_t stype, const STORAGE* storage) {\n  STYPE_REGISTER_TABLE(ttable);\n  ttable[stype](storage);\n}\n\n/**\n * Unregister a STORAGE struct of the supplied stype to allow normal garbage collection\n * of its internals.\n *\n * Delegates to the storage-specific methods.  They will check dtype and ignore\n * non-rubyobject dtypes, so it's safe to pass any storage in.\n *\n */\nvoid nm_unregister_storage(nm::stype_t stype, const STORAGE* storage) {\n  STYPE_UNREGISTER_TABLE(ttable);\n  ttable[stype](storage);\n}\n\n/**\n * Registers an NMATRIX struct to avoid garbage collection of its internals.\n */\nvoid nm_register_nmatrix(NMATRIX* nmatrix) {\n  if (nmatrix)\n    nm_register_storage(nmatrix->stype, nmatrix->storage);\n}\n\n/**\n * Unregisters an NMATRIX struct to avoid garbage collection of its internals.\n */\nvoid nm_unregister_nmatrix(NMATRIX* nmatrix) {\n  if (nmatrix)\n    nm_unregister_storage(nmatrix->stype, nmatrix->storage);\n}\n\n/*\n * call-seq:\n *     dtype -> Symbol\n *\n * Get the data type (dtype) of a matrix, e.g., :byte, :int8, :int16, :int32,\n * :int64, :float32, :float64, :complex64, :complex128,\n * or :object (the last is a Ruby object).\n */\nstatic VALUE nm_dtype(VALUE self) {\n  ID dtype = rb_intern(DTYPE_NAMES[NM_DTYPE(self)]);\n  return ID2SYM(dtype);\n}\n\n\n/*\n * call-seq:\n *     upcast(first_dtype, second_dtype) -> Symbol\n *\n * Given a binary operation between types t1 and t2, what type will be returned?\n *\n * This is a singleton method on NMatrix, e.g., NMatrix.upcast(:int32, :int64)\n */\nstatic VALUE nm_upcast(VALUE self, VALUE t1, VALUE t2) {\n  nm::dtype_t d1    = nm_dtype_from_rbsymbol(t1),\n              d2    = nm_dtype_from_rbsymbol(t2);\n\n  return ID2SYM(rb_intern( DTYPE_NAMES[ Upcast[d1][d2] ] ));\n}\n\n\n/*\n * call-seq:\n       default_value -> ...\n *\n * Get the default value for the matrix. For dense, this is undefined and will return Qnil. For list, it is user-defined.\n * For yale, it's going to be some variation on zero, but may be Qfalse or Qnil.\n */\nstatic VALUE nm_default_value(VALUE self) {\n  switch(NM_STYPE(self)) {\n  case nm::YALE_STORE:\n    return nm_yale_default_value(self);\n  case nm::LIST_STORE:\n    return nm_list_default_value(self);\n  case nm::DENSE_STORE:\n  default:\n    return Qnil;\n  }\n}\n\n\n/*\n * call-seq:\n *     each_with_indices -> Enumerator\n *\n * Iterate over all entries of any matrix in standard storage order (as with #each), and include the indices.\n */\nstatic VALUE nm_each_with_indices(VALUE nmatrix) {\n  NM_CONSERVATIVE(nm_register_value(&nmatrix));\n  VALUE to_return = Qnil;\n\n  switch(NM_STYPE(nmatrix)) {\n  case nm::YALE_STORE:\n    to_return = nm_yale_each_with_indices(nmatrix);\n    break;\n  case nm::DENSE_STORE:\n    to_return = nm_dense_each_with_indices(nmatrix);\n    break;\n  case nm::LIST_STORE:\n    to_return = nm_list_each_with_indices(nmatrix, false);\n    break;\n  default:\n    NM_CONSERVATIVE(nm_unregister_value(&nmatrix));\n    rb_raise(nm_eDataTypeError, \"Not a proper storage type\");\n  }\n\n  NM_CONSERVATIVE(nm_unregister_value(&nmatrix));\n  return to_return;\n}\n\n/*\n * call-seq:\n *     each_stored_with_indices -> Enumerator\n *\n * Iterate over the stored entries of any matrix. For dense and yale, this iterates over non-zero\n * entries; for list, this iterates over non-default entries. Yields dim+1 values for each entry:\n * i, j, ..., and the entry itself.\n */\nstatic VALUE nm_each_stored_with_indices(VALUE nmatrix) {\n  NM_CONSERVATIVE(nm_register_value(&nmatrix));\n  VALUE to_return = Qnil;\n\n  switch(NM_STYPE(nmatrix)) {\n  case nm::YALE_STORE:\n    to_return = nm_yale_each_stored_with_indices(nmatrix);\n    break;\n  case nm::DENSE_STORE:\n    to_return = nm_dense_each_with_indices(nmatrix);\n    break;\n  case nm::LIST_STORE:\n    to_return = nm_list_each_with_indices(nmatrix, true);\n    break;\n  default:\n    NM_CONSERVATIVE(nm_unregister_value(&nmatrix));\n    rb_raise(nm_eDataTypeError, \"Not a proper storage type\");\n  }\n\n  NM_CONSERVATIVE(nm_unregister_value(&nmatrix));\n  return to_return;\n}\n\n\n/*\n * call-seq:\n *     map_stored -> Enumerator\n *\n * Iterate over the stored entries of any matrix. For dense and yale, this iterates over non-zero\n * entries; for list, this iterates over non-default entries. Yields dim+1 values for each entry:\n * i, j, ..., and the entry itself.\n */\nstatic VALUE nm_map_stored(VALUE nmatrix) {\n  NM_CONSERVATIVE(nm_register_value(&nmatrix));\n  VALUE to_return = Qnil;\n\n  switch(NM_STYPE(nmatrix)) {\n  case nm::YALE_STORE:\n    to_return = nm_yale_map_stored(nmatrix);\n    break;\n  case nm::DENSE_STORE:\n    to_return = nm_dense_map(nmatrix);\n    break;\n  case nm::LIST_STORE:\n    to_return = nm_list_map_stored(nmatrix, Qnil);\n    break;\n  default:\n    NM_CONSERVATIVE(nm_unregister_value(&nmatrix));\n    rb_raise(nm_eDataTypeError, \"Not a proper storage type\");\n  }\n\n  NM_CONSERVATIVE(nm_unregister_value(&nmatrix));\n  return to_return;\n}\n\n/*\n * call-seq:\n *     each_ordered_stored_with_indices -> Enumerator\n *\n * Very similar to #each_stored_with_indices. The key difference is that it enforces matrix ordering rather\n * than storage ordering, which only matters if your matrix is Yale.\n */\nstatic VALUE nm_each_ordered_stored_with_indices(VALUE nmatrix) {\n  NM_CONSERVATIVE(nm_register_value(&nmatrix));\n  VALUE to_return = Qnil;\n\n  switch(NM_STYPE(nmatrix)) {\n  case nm::YALE_STORE:\n    to_return = nm_yale_each_ordered_stored_with_indices(nmatrix);\n    break;\n  case nm::DENSE_STORE:\n    to_return = nm_dense_each_with_indices(nmatrix);\n    break;\n  case nm::LIST_STORE:\n    to_return = nm_list_each_with_indices(nmatrix, true);\n    break;\n  default:\n    NM_CONSERVATIVE(nm_unregister_value(&nmatrix));\n    rb_raise(nm_eDataTypeError, \"Not a proper storage type\");\n  }\n\n  NM_CONSERVATIVE(nm_unregister_value(&nmatrix));\n  return to_return;\n}\n\n\n/*\n * Equality operator. Returns a single true or false value indicating whether\n * the matrices are equivalent.\n *\n * For elementwise, use =~ instead.\n *\n * This method will raise an exception if dimensions do not match.\n *\n * When stypes differ, this function calls a protected Ruby method.\n */\nstatic VALUE nm_eqeq(VALUE left, VALUE right) {\n  NM_CONSERVATIVE(nm_register_value(&left));\n  NM_CONSERVATIVE(nm_register_value(&right));\n\n  NMATRIX *l, *r;\n\n  CheckNMatrixType(left);\n  CheckNMatrixType(right);\n\n  UnwrapNMatrix(left, l);\n  UnwrapNMatrix(right, r);\n\n  bool result = false;\n\n  // Check that the shapes match before going any further.\n  if (l->storage->dim != r->storage->dim) {\n    NM_CONSERVATIVE(nm_unregister_value(&left));\n    NM_CONSERVATIVE(nm_unregister_value(&right));\n    rb_raise(nm_eShapeError, \"cannot compare matrices with different dimension\");\n  }\n\n  size_t dim = l->storage->dim;\n  for (size_t i=0; i<dim; i++) {\n    if (l->storage->shape[i] != r->storage->shape[i]) {\n      NM_CONSERVATIVE(nm_unregister_value(&left));\n      NM_CONSERVATIVE(nm_unregister_value(&right));\n      rb_raise(nm_eShapeError, \"cannot compare matrices with different shapes\");\n    }\n  }\n\n  if (l->stype != r->stype) { // DIFFERENT STYPES\n\n    if (l->stype == nm::DENSE_STORE)\n      result = rb_funcall(left, rb_intern(\"dense_eql_sparse?\"), 1, right);\n    else if (r->stype == nm::DENSE_STORE)\n      result = rb_funcall(right, rb_intern(\"dense_eql_sparse?\"), 1, left);\n    else\n      result = rb_funcall(left, rb_intern(\"sparse_eql_sparse?\"), 1, right);\n\n  } else {\n\n    switch(l->stype) {       // SAME STYPES\n    case nm::DENSE_STORE:\n      result = nm_dense_storage_eqeq(l->storage, r->storage);\n      break;\n    case nm::LIST_STORE:\n      result = nm_list_storage_eqeq(l->storage, r->storage);\n      break;\n    case nm::YALE_STORE:\n      result = nm_yale_storage_eqeq(l->storage, r->storage);\n      break;\n    }\n  }\n\n  NM_CONSERVATIVE(nm_unregister_value(&left));\n  NM_CONSERVATIVE(nm_unregister_value(&right));\n\n  return result ? Qtrue : Qfalse;\n}\n\nDEF_ELEMENTWISE_RUBY_ACCESSOR(ADD, add)\nDEF_ELEMENTWISE_RUBY_ACCESSOR(SUB, subtract)\nDEF_ELEMENTWISE_RUBY_ACCESSOR(MUL, multiply)\nDEF_ELEMENTWISE_RUBY_ACCESSOR(DIV, divide)\nDEF_ELEMENTWISE_RUBY_ACCESSOR(POW, power)\nDEF_ELEMENTWISE_RUBY_ACCESSOR(MOD, mod)\nDEF_ELEMENTWISE_RUBY_ACCESSOR(EQEQ, eqeq)\nDEF_ELEMENTWISE_RUBY_ACCESSOR(NEQ, neq)\nDEF_ELEMENTWISE_RUBY_ACCESSOR(LEQ, leq)\nDEF_ELEMENTWISE_RUBY_ACCESSOR(GEQ, geq)\nDEF_ELEMENTWISE_RUBY_ACCESSOR(LT, lt)\nDEF_ELEMENTWISE_RUBY_ACCESSOR(GT, gt)\n\nDEF_UNARY_RUBY_ACCESSOR(SIN, sin)\nDEF_UNARY_RUBY_ACCESSOR(COS, cos)\nDEF_UNARY_RUBY_ACCESSOR(TAN, tan)\nDEF_UNARY_RUBY_ACCESSOR(ASIN, asin)\nDEF_UNARY_RUBY_ACCESSOR(ACOS, acos)\nDEF_UNARY_RUBY_ACCESSOR(ATAN, atan)\nDEF_UNARY_RUBY_ACCESSOR(SINH, sinh)\nDEF_UNARY_RUBY_ACCESSOR(COSH, cosh)\nDEF_UNARY_RUBY_ACCESSOR(TANH, tanh)\nDEF_UNARY_RUBY_ACCESSOR(ASINH, asinh)\nDEF_UNARY_RUBY_ACCESSOR(ACOSH, acosh)\nDEF_UNARY_RUBY_ACCESSOR(ATANH, atanh)\nDEF_UNARY_RUBY_ACCESSOR(EXP, exp)\nDEF_UNARY_RUBY_ACCESSOR(LOG2, log2)\nDEF_UNARY_RUBY_ACCESSOR(LOG10, log10)\nDEF_UNARY_RUBY_ACCESSOR(SQRT, sqrt)\nDEF_UNARY_RUBY_ACCESSOR(ERF, erf)\nDEF_UNARY_RUBY_ACCESSOR(ERFC, erfc)\nDEF_UNARY_RUBY_ACCESSOR(CBRT, cbrt)\nDEF_UNARY_RUBY_ACCESSOR(GAMMA, gamma)\nDEF_UNARY_RUBY_ACCESSOR(NEGATE, negate)\nDEF_UNARY_RUBY_ACCESSOR(FLOOR, floor)\nDEF_UNARY_RUBY_ACCESSOR(CEIL, ceil)\n\nDEF_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(ATAN2, atan2)\nDEF_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(LDEXP, ldexp)\nDEF_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(HYPOT, hypot)\n\nstatic VALUE nm_unary_log(int argc, VALUE* argv, VALUE self) {\n  NM_CONSERVATIVE(nm_register_values(argv, argc));\n  const double default_log_base = exp(1.0);\n  NMATRIX* left;\n  UnwrapNMatrix(self, left);\n  std::string sym;\n\n  switch(left->stype) {\n  case nm::DENSE_STORE:\n    sym = \"__dense_unary_log__\";\n    break;\n  case nm::YALE_STORE:\n    sym = \"__yale_unary_log__\";\n    break;\n  case nm::LIST_STORE:\n    sym = \"__list_unary_log__\";\n    break;\n  }\n  NM_CONSERVATIVE(nm_unregister_values(argv, argc));\n  if (argc > 0) { //supplied a base\n    return rb_funcall(self, rb_intern(sym.c_str()), 1, argv[0]);\n  }\n  return rb_funcall(self, rb_intern(sym.c_str()), 1, nm::RubyObject(default_log_base).rval);\n}\n\nstatic VALUE nm_unary_round(int argc, VALUE* argv, VALUE self) {\n  NM_CONSERVATIVE(nm_register_values(argv, argc));\n  const int default_precision = 0;\n  NMATRIX* left;\n  UnwrapNMatrix(self, left);\n  std::string sym;\n\n  switch(left->stype) {\n  case nm::DENSE_STORE:\n    sym = \"__dense_unary_round__\";\n    break;\n  case nm::YALE_STORE:\n    sym = \"__yale_unary_round__\";\n    break;\n  case nm::LIST_STORE:\n    sym = \"__list_unary_round__\";\n    break;\n  }\n  NM_CONSERVATIVE(nm_unregister_values(argv, argc));\n  if (argc > 0) { //supplied precision\n    return rb_funcall(self, rb_intern(sym.c_str()), 1, argv[0]);\n  }\n  return rb_funcall(self, rb_intern(sym.c_str()), 1, nm::RubyObject(default_precision).rval);\n}\n\n//DEF_ELEMENTWISE_RUBY_ACCESSOR(ATAN2, atan2)\n//DEF_ELEMENTWISE_RUBY_ACCESSOR(LDEXP, ldexp)\n//DEF_ELEMENTWISE_RUBY_ACCESSOR(HYPOT, hypot)\n\n/*\n * call-seq:\n *     hermitian? -> Boolean\n *\n * Is this matrix hermitian?\n *\n * Definition: http://en.wikipedia.org/wiki/Hermitian_matrix\n *\n * For non-complex matrices, this function should return the same result as symmetric?.\n */\nstatic VALUE nm_hermitian(VALUE self) {\n  return is_symmetric(self, true);\n}\n\n\n/*\n * call-seq:\n *     complex_conjugate_bang -> NMatrix\n *\n * Transform the matrix (in-place) to its complex conjugate. Only works on complex matrices.\n *\n * Bang should imply that no copy is being made, even temporarily.\n */\nstatic VALUE nm_complex_conjugate_bang(VALUE self) {\n\n  NMATRIX* m;\n  void* elem;\n  size_t size, p;\n\n  UnwrapNMatrix(self, m);\n\n  if (m->stype == nm::DENSE_STORE) {\n\n    size = nm_storage_count_max_elements(NM_STORAGE(self));\n    elem = NM_STORAGE_DENSE(self)->elements;\n\n  } else if (m->stype == nm::YALE_STORE) {\n\n    size = nm_yale_storage_get_size(NM_STORAGE_YALE(self));\n    elem = NM_STORAGE_YALE(self)->a;\n\n  } else {\n    rb_raise(rb_eNotImpError, \"please cast to yale or dense (complex) first\");\n  }\n\n  // Walk through and negate the imaginary component\n  if (NM_DTYPE(self) == nm::COMPLEX64) {\n\n    for (p = 0; p < size; ++p) {\n      reinterpret_cast<nm::Complex64*>(elem)[p].i = -reinterpret_cast<nm::Complex64*>(elem)[p].i;\n    }\n\n  } else if (NM_DTYPE(self) == nm::COMPLEX128) {\n\n    for (p = 0; p < size; ++p) {\n      reinterpret_cast<nm::Complex128*>(elem)[p].i = -reinterpret_cast<nm::Complex128*>(elem)[p].i;\n    }\n\n  }\n  return self;\n}\n\n/*\n * call-seq:\n *     __reshape!__ -> NMatrix\n *\n * Reshapes the matrix (in-place) to the desired shape. Note that this function does not do a resize; the product of\n * the new and old shapes' components must be equal.\n *\n */\nstatic VALUE nm_reshape_bang(VALUE self, VALUE arg){\n  NMATRIX* m;\n  UnwrapNMatrix(self, m);\n  if(m->stype == nm::DENSE_STORE){\n    DENSE_STORAGE* s   = NM_STORAGE_DENSE(self);\n    VALUE shape_ary = arg;\n    size_t dim;\n    size_t size = nm_storage_count_max_elements(s);\n    size_t new_size = 1;\n    size_t* shape = interpret_shape(shape_ary, &dim);\n    for (size_t index = 0; index < dim; ++index){\n      new_size *= shape[index];}\n\n    if (size == new_size){\n      s->shape = shape;\n      s->dim = dim;\n      NM_FREE(s->offset);\n      s->offset = NM_ALLOC_N(size_t, dim);\n      memset(s->offset, 0, sizeof(size_t)*dim);\n      size_t i, j;\n      size_t* stride = NM_ALLOC_N(size_t, dim);\n      for (i = 0; i < dim; ++i) {\n        stride[i] = 1;\n        for (j = i+1; j < dim; ++j) {\n          stride[i] *= shape[j];\n        }\n      }\n      NM_FREE(s->stride);\n      s->stride = stride;\n      return self;\n     }\n     else\n       rb_raise(rb_eArgError, \"reshape cannot resize; size of new and old matrices must match\");\n  }\n  else {\n    rb_raise(rb_eNotImpError, \"reshape in place only for dense stype\");\n  }\n}\n\n/*\n * Helper function for creating a matrix. You have to create the storage and pass it in, but you don't\n * need to worry about deleting it.\n */\nNMATRIX* nm_create(nm::stype_t stype, STORAGE* storage) {\n  nm_register_storage(stype, storage);\n  NMATRIX* mat = NM_ALLOC(NMATRIX);\n\n  mat->stype   = stype;\n  mat->storage = storage;\n\n  nm_unregister_storage(stype, storage);\n  return mat;\n}\n\n/*\n * @see nm_init\n */\nstatic VALUE nm_init_new_version(int argc, VALUE* argv, VALUE self) {\n  NM_CONSERVATIVE(nm_register_values(argv, argc));\n  NM_CONSERVATIVE(nm_register_value(&self));\n  VALUE shape_ary, initial_ary, hash;\n  //VALUE shape_ary, default_val, capacity, initial_ary, dtype_sym, stype_sym;\n  // Mandatory args: shape, dtype, stype\n  rb_scan_args(argc, argv, \"11:\", &shape_ary, &initial_ary, &hash); // &stype_sym, &dtype_sym, &default_val, &capacity);\n\n  NM_CONSERVATIVE(nm_register_value(&shape_ary));\n  NM_CONSERVATIVE(nm_register_value(&initial_ary));\n  NM_CONSERVATIVE(nm_register_value(&hash));\n  // Get the shape.\n  size_t  dim;\n  size_t* shape = interpret_shape(shape_ary, &dim);\n  void*   init;\n  void*   v = NULL;\n  size_t  v_size = 0;\n\n  nm::stype_t stype = nm::DENSE_STORE;\n  nm::dtype_t dtype = nm::RUBYOBJ;\n  VALUE dtype_sym = Qnil, stype_sym = Qnil, default_val_num = Qnil, capacity_num = Qnil;\n  size_t capacity = 0;\n  if (!NIL_P(hash)) {\n    dtype_sym       = rb_hash_aref(hash, ID2SYM(nm_rb_dtype));\n    stype_sym       = rb_hash_aref(hash, ID2SYM(nm_rb_stype));\n    capacity_num    = rb_hash_aref(hash, ID2SYM(nm_rb_capacity));\n    NM_CONSERVATIVE(nm_register_value(&capacity_num));\n    default_val_num = rb_hash_aref(hash, ID2SYM(nm_rb_default));\n    NM_CONSERVATIVE(nm_register_value(&default_val_num));\n  }\n\n  //     stype ||= :dense\n  stype = !NIL_P(stype_sym) ? nm_stype_from_rbsymbol(stype_sym) : nm::DENSE_STORE;\n\n  //     dtype ||= h[:dtype] || guess_dtype(initial_ary) || :object\n  if (NIL_P(initial_ary) && NIL_P(dtype_sym))\n    dtype = nm::RUBYOBJ;\n  else if (NIL_P(dtype_sym))\n    dtype = nm_dtype_guess(initial_ary);\n  else\n    dtype = nm_dtype_from_rbsymbol(dtype_sym);\n\n  //   if stype != :dense\n  //     if initial_ary.nil?\n  //       init = h[:default] || 0\n  //     elsif initial_ary.is_a?(Array)\n  //       init = initial_ary.size > 1 ? (h[:default] || 0) : initial_ary[0]\n  //     else\n  //       init = initial_ary # not an array, just a value\n  //     end\n  //   end\n  if (stype != nm::DENSE_STORE) {\n    if (!NIL_P(default_val_num))\n      init = rubyobj_to_cval(default_val_num, dtype);\n    else if (NIL_P(initial_ary))\n      init = NULL;\n    else if (RB_TYPE_P(initial_ary, T_ARRAY))\n      init = RARRAY_LEN(initial_ary) == 1 ? rubyobj_to_cval(rb_ary_entry(initial_ary, 0), dtype) : NULL;\n    else\n      init = rubyobj_to_cval(initial_ary, dtype);\n\n    if (dtype == nm::RUBYOBJ) {\n      nm_register_values(reinterpret_cast<VALUE*>(init), 1);\n    }\n  }\n\n  // capacity = h[:capacity] || 0\n  if (stype == nm::YALE_STORE) {\n    if (!NIL_P(capacity_num)) capacity = FIX2INT(capacity_num);\n  }\n\n  if (!NIL_P(initial_ary)) {\n\n    if (RB_TYPE_P(initial_ary, T_ARRAY)) v_size = RARRAY_LEN(initial_ary);\n    else                                 v_size = 1;\n\n    v = interpret_initial_value(initial_ary, dtype);\n\n    if (dtype == nm::RUBYOBJ) {\n      nm_register_values(reinterpret_cast<VALUE*>(v), v_size);\n    }\n  }\n\n  // :object matrices MUST be initialized.\n  else if (stype == nm::DENSE_STORE && dtype == nm::RUBYOBJ) {\n    // Pretend [nil] was passed for RUBYOBJ.\n    v          = NM_ALLOC(VALUE);\n    *(VALUE*)v = Qnil;\n\n    v_size = 1;\n\n  }\n\n  NMATRIX* nmatrix;\n  UnwrapNMatrix(self, nmatrix);\n\n  nmatrix->stype = stype;\n\n  switch (stype) {\n    case nm::DENSE_STORE:\n      nmatrix->storage = (STORAGE*)nm_dense_storage_create(dtype, shape, dim, v, v_size);\n      break;\n\n    case nm::LIST_STORE:\n      nmatrix->storage = (STORAGE*)nm_list_storage_create(dtype, shape, dim, init);\n      break;\n\n    case nm::YALE_STORE:\n      nmatrix->storage = (STORAGE*)nm_yale_storage_create(dtype, shape, dim, capacity);\n      nm_yale_storage_init((YALE_STORAGE*)(nmatrix->storage), init);\n      break;\n  }\n\n  nm_register_storage(stype, nmatrix->storage);\n\n  // If we're not creating a dense, and an initial array was provided, use that and multi-slice-set\n  // to set the contents of the matrix right now.\n  if (stype != nm::DENSE_STORE && v_size > 1) {\n    VALUE* slice_argv = NM_ALLOCA_N(VALUE, dim);\n    nm_register_values(slice_argv, dim);\n    size_t* tmp_shape = NM_ALLOC_N(size_t, dim);\n    for (size_t m = 0; m < dim; ++m) {\n      slice_argv[m] = ID2SYM(nm_rb_mul); // :* -- full range\n      tmp_shape[m]  = shape[m];\n    }\n\n    SLICE slice_s;\n    SLICE* slice = &slice_s;\n    slice->coords = NM_ALLOCA_N(size_t, dim);\n    slice->lengths = NM_ALLOCA_N(size_t, dim);\n    init_slice_no_alloc(slice, dim, dim, slice_argv, shape);\n\n    // Create a temporary dense matrix and use it to do a slice assignment on self.\n    NMATRIX* tmp = nm_create(nm::DENSE_STORE, (STORAGE*)nm_dense_storage_create(dtype, tmp_shape, dim, v, v_size));\n    nm_register_nmatrix(tmp);\n    VALUE rb_tmp = Data_Wrap_Struct(CLASS_OF(self), nm_mark, nm_delete, tmp);\n    nm_unregister_nmatrix(tmp);\n    nm_register_value(&rb_tmp);\n    if (stype == nm::YALE_STORE)  nm_yale_storage_set(self, slice, rb_tmp);\n    else                          nm_list_storage_set(self, slice, rb_tmp);\n\n    // We need to free v if it's not the same size as tmp -- because tmp will have made a copy instead.\n    //if (nm_storage_count_max_elements(tmp->storage) != v_size)\n    //  NM_FREE(v);\n\n    // nm_delete(tmp); // This seems to enrage the garbage collector (because rb_tmp is still available). It'd be better if we could force it to free immediately, but no sweat.\n\n    nm_unregister_value(&rb_tmp);\n    nm_unregister_values(slice_argv, dim);\n  }\n\n  if (!NIL_P(initial_ary) && dtype == nm::RUBYOBJ) {\n    nm_unregister_values(reinterpret_cast<VALUE*>(v), v_size);\n  }\n\n  if (stype != nm::DENSE_STORE && dtype == nm::RUBYOBJ) {\n    nm_unregister_values(reinterpret_cast<VALUE*>(init), 1);\n  }\n\n  if (!NIL_P(hash)) {\n    NM_CONSERVATIVE(nm_unregister_value(&capacity_num));\n    NM_CONSERVATIVE(nm_unregister_value(&default_val_num));\n  }\n\n  NM_CONSERVATIVE(nm_unregister_value(&shape_ary));\n  NM_CONSERVATIVE(nm_unregister_value(&initial_ary));\n  NM_CONSERVATIVE(nm_unregister_value(&hash));\n\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n  NM_CONSERVATIVE(nm_unregister_values(argv, argc));\n  nm_unregister_storage(stype, nmatrix->storage);\n\n  return self;\n}\n\n/*\n * call-seq:\n *     new(shape) -> NMatrix\n *     new(shape, initial_value) -> NMatrix\n *     new(shape, initial_array) -> NMatrix\n *     new(shape, initial_value, options) -> NMatrix\n *     new(shape, initial_array, options) -> NMatrix\n *\n * Create a new NMatrix.\n *\n * The only mandatory argument is shape, which may be a positive integer or an array of positive integers.\n *\n * It is recommended that you supply an initialization value or array of values. Without one, Yale and List matrices will\n * be initialized to 0; and dense matrices will be undefined.\n *\n * Additional options may be provided using keyword arguments. The keywords are +:dtype, +:stype+, +:capacity+, and\n * +:default+. Only Yale uses a capacity argument, which is used to reserve the initial size of its storage vectors.\n * List and Yale both accept a default value (which itself defaults to 0). This default is taken from the initial value\n * if such a value is given; it is more likely to be required when an initial array is provided.\n *\n * The storage type, or stype, is used to specify whether we want a +:dense+, +:list+, or +:yale+ matrix; dense is the\n * default.\n *\n * The data type, or dtype, can be one of: :byte, :int8, :int16, :int32, :int64, :float32, :float64, :complex64,\n * :complex128, or :object. The constructor will attempt to guess it from the initial value/array/default\n * provided, if any. Otherwise, the default is :object, which stores any type of Ruby object.\n *\n * In addition to the above, there is a legacy constructor from the alpha version. To use that version, you must be\n * providing exactly four arguments. It is now deprecated.\n *\n * There is one additional constructor for advanced users, which takes seven arguments and is only for creating Yale\n * matrices with known IA, JA, and A arrays. This is used primarily internally for IO, e.g., reading Matlab matrices,\n * which are stored in old Yale (not our Yale) format. But be careful; there are no overflow warnings. All of these\n * constructors are defined for power-users. Everyone else should probably resort to the shortcut functions defined in\n * shortcuts.rb.\n */\nstatic VALUE nm_init(int argc, VALUE* argv, VALUE nm) {\n  NM_CONSERVATIVE(nm_register_value(&nm));\n  NM_CONSERVATIVE(nm_register_values(argv, argc));\n\n  if (argc <= 3) { // Call the new constructor unless all four arguments are given (or the 7-arg version is given)\n    NM_CONSERVATIVE(nm_unregister_values(argv, argc));\n    NM_CONSERVATIVE(nm_unregister_value(&nm));\n    return nm_init_new_version(argc, argv, nm);\n  }\n\n  /* First, determine stype (dense by default) */\n  nm::stype_t stype;\n  size_t  offset = 0;\n\n  if (!SYMBOL_P(argv[0]) && !RB_TYPE_P(argv[0], T_STRING)) {\n    stype = nm::DENSE_STORE;\n\n  } else {\n    // 0: String or Symbol\n    stype  = interpret_stype(argv[0]);\n    offset = 1;\n  }\n\n  // If there are 7 arguments and Yale, refer to a different init function with fewer sanity checks.\n  if (argc == 7) {\n    if (stype == nm::YALE_STORE) {\n      NM_CONSERVATIVE(nm_unregister_values(argv, argc));\n      NM_CONSERVATIVE(nm_unregister_value(&nm));\n      return nm_init_yale_from_old_yale(argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], nm);\n\n    } else {\n      NM_CONSERVATIVE(nm_unregister_values(argv, argc));\n      NM_CONSERVATIVE(nm_unregister_value(&nm));\n      rb_raise(rb_eArgError, \"Expected 2-4 arguments (or 7 for internal Yale creation)\");\n    }\n  }\n\n  // 1: Array or Fixnum\n  size_t dim;\n  size_t* shape = interpret_shape(argv[offset], &dim);\n\n  // 2-3: dtype\n  nm::dtype_t dtype = interpret_dtype(argc-1-offset, argv+offset+1, stype);\n\n  size_t init_cap = 0, init_val_len = 0;\n  void* init_val  = NULL;\n  if (!SYMBOL_P(argv[1+offset]) || RB_TYPE_P(argv[1+offset], T_ARRAY)) {\n    // Initial value provided (could also be initial capacity, if yale).\n\n    if (stype == nm::YALE_STORE && NM_RUBYVAL_IS_NUMERIC(argv[1+offset])) {\n      init_cap = FIX2UINT(argv[1+offset]);\n\n    } else {\n      // 4: initial value / dtype\n      init_val = interpret_initial_value(argv[1+offset], dtype);\n\n      if (RB_TYPE_P(argv[1+offset], T_ARRAY)) init_val_len = RARRAY_LEN(argv[1+offset]);\n      else                                    init_val_len = 1;\n    }\n\n  } else {\n    // DType is RUBYOBJ.\n\n    if (stype == nm::DENSE_STORE) {\n      /*\n       * No need to initialize dense with any kind of default value unless it's\n       * an RUBYOBJ matrix.\n       */\n      if (dtype == nm::RUBYOBJ) {\n        // Pretend [nil] was passed for RUBYOBJ.\n        init_val = NM_ALLOC(VALUE);\n        *(VALUE*)init_val = Qnil;\n\n        init_val_len = 1;\n\n      } else {\n        init_val = NULL;\n      }\n    } else if (stype == nm::LIST_STORE) {\n      init_val = NM_ALLOC_N(char, DTYPE_SIZES[dtype]);\n      std::memset(init_val, 0, DTYPE_SIZES[dtype]);\n    }\n  }\n\n  if (dtype == nm::RUBYOBJ) {\n    nm_register_values(reinterpret_cast<VALUE*>(init_val), init_val_len);\n  }\n\n  // TODO: Update to allow an array as the initial value.\n  NMATRIX* nmatrix;\n  UnwrapNMatrix(nm, nmatrix);\n\n  nmatrix->stype = stype;\n\n  switch (stype) {\n    case nm::DENSE_STORE:\n      nmatrix->storage = (STORAGE*)nm_dense_storage_create(dtype, shape, dim, init_val, init_val_len);\n      break;\n\n    case nm::LIST_STORE:\n      nmatrix->storage = (STORAGE*)nm_list_storage_create(dtype, shape, dim, init_val);\n      break;\n\n    case nm::YALE_STORE:\n      nmatrix->storage = (STORAGE*)nm_yale_storage_create(dtype, shape, dim, init_cap);\n      nm_yale_storage_init((YALE_STORAGE*)(nmatrix->storage), NULL);\n      break;\n  }\n\n  if (dtype == nm::RUBYOBJ) {\n    nm_unregister_values(reinterpret_cast<VALUE*>(init_val), init_val_len);\n  }\n\n  NM_CONSERVATIVE(nm_unregister_values(argv, argc));\n  NM_CONSERVATIVE(nm_unregister_value(&nm));\n\n  return nm;\n}\n\n\n/*\n * Helper for nm_cast_with_types which uses the C types instead of the Ruby objects. \n * Called by nm_cast_with_types.\n */\nNMATRIX* nm_cast_with_ctype_args(NMATRIX* self, nm::stype_t new_stype, nm::dtype_t new_dtype, void* init_ptr) {\n\n  nm_register_nmatrix(self);\n\n  NMATRIX* lhs = NM_ALLOC(NMATRIX);\n  lhs->stype   = new_stype;\n\n  // Copy the storage\n  CAST_TABLE(cast_copy);\n  lhs->storage = cast_copy[lhs->stype][self->stype](self->storage, new_dtype, init_ptr);\n\n  nm_unregister_nmatrix(self);\n\n  return lhs;\n}\n\n/*\n * Cast NMatrix with given new_stype and new_dtype. Called by nm_cast.\n */\nVALUE nm_cast_with_types(VALUE self, nm::stype_t new_stype, nm::dtype_t new_dtype, \n        void* init_ptr) {\n  NMATRIX *rhs;\n\n  UnwrapNMatrix( self, rhs );\n\n  NMATRIX* m = nm_cast_with_ctype_args(rhs, new_stype, new_dtype, init_ptr);\n  nm_register_nmatrix(m);\n\n  VALUE to_return = Data_Wrap_Struct(CLASS_OF(self), nm_mark, nm_delete, m);\n\n  nm_unregister_nmatrix(m);\n  return to_return;\n}\n\n/*\n * call-seq:\n *     cast_full(stype) -> NMatrix\n *     cast_full(stype, dtype, sparse_basis) -> NMatrix\n *\n * Copy constructor for changing dtypes and stypes.\n */\nVALUE nm_cast(VALUE self, VALUE new_stype_symbol, VALUE new_dtype_symbol, VALUE init) {\n  NM_CONSERVATIVE(nm_register_value(&self));\n  NM_CONSERVATIVE(nm_register_value(&init));\n\n  nm::dtype_t new_dtype = nm_dtype_from_rbsymbol(new_dtype_symbol);\n  nm::stype_t new_stype = nm_stype_from_rbsymbol(new_stype_symbol);\n\n  CheckNMatrixType(self);\n  void* init_ptr = NM_ALLOCA_N(char, DTYPE_SIZES[new_dtype]);\n  rubyval_to_cval(init, new_dtype, init_ptr);\n\n  VALUE to_return = nm_cast_with_types(self, new_stype, new_dtype, init_ptr);\n\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n  NM_CONSERVATIVE(nm_unregister_value(&init));\n  return to_return;\n\n}\n\n/*\n * Copy constructor for transposing.\n */\nstatic VALUE nm_init_transposed(VALUE self) {\n  NM_CONSERVATIVE(nm_register_value(&self));\n\n  static STORAGE* (*storage_copy_transposed[nm::NUM_STYPES])(const STORAGE* rhs_base) = {\n    nm_dense_storage_copy_transposed,\n    nm_list_storage_copy_transposed,\n    nm_yale_storage_copy_transposed\n  };\n\n  NMATRIX* lhs = nm_create( NM_STYPE(self),\n                            storage_copy_transposed[NM_STYPE(self)]( NM_STORAGE(self) )\n                          );\n  nm_register_nmatrix(lhs);\n  VALUE to_return = Data_Wrap_Struct(CLASS_OF(self), nm_mark, nm_delete, lhs);\n\n  nm_unregister_nmatrix(lhs);\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n  return to_return;\n}\n\n/*\n * Copy constructor for no change of dtype or stype (used for #initialize_copy hook).\n */\nstatic VALUE nm_init_copy(VALUE copy, VALUE original) {\n  NM_CONSERVATIVE(nm_register_value(&copy));\n  NM_CONSERVATIVE(nm_register_value(&original));\n\n  NMATRIX *lhs, *rhs;\n\n  CheckNMatrixType(original);\n\n  if (copy == original) {\n    NM_CONSERVATIVE(nm_unregister_value(&copy));\n    NM_CONSERVATIVE(nm_unregister_value(&original));\n    return copy;\n  }\n\n  UnwrapNMatrix( original, rhs );\n  UnwrapNMatrix( copy,     lhs );\n\n  lhs->stype = rhs->stype;\n\n  // Copy the storage\n  CAST_TABLE(ttable);\n  lhs->storage = ttable[lhs->stype][rhs->stype](rhs->storage, rhs->storage->dtype, NULL);\n\n  NM_CONSERVATIVE(nm_unregister_value(&copy));\n  NM_CONSERVATIVE(nm_unregister_value(&original));\n\n  return copy;\n}\n\n/*\n * Get major, minor, and release components of NMatrix::VERSION. Store in function parameters. Doesn't get\n * the \"pre\" field currently (beta1/rc1/etc).\n */\nstatic void get_version_info(uint16_t& major, uint16_t& minor, uint16_t& release) {\n  // Get VERSION and split it on periods. Result is an Array.\n  VALUE cVersion = rb_const_get(cNMatrix, rb_intern(\"VERSION\"));\n\n  // Convert each to an integer\n  major   = FIX2INT(rb_const_get(cVersion, rb_intern(\"MAJOR\")));\n  minor   = FIX2INT(rb_const_get(cVersion, rb_intern(\"MINOR\")));\n  release = FIX2INT(rb_const_get(cVersion, rb_intern(\"TINY\")));\n}\n\n\n/*\n * Interpret the NMatrix::write symmetry argument (which should be nil or a symbol). Return a symm_t (enum).\n */\nstatic nm::symm_t interpret_symm(VALUE symm) {\n  if (symm == Qnil) return nm::NONSYMM;\n\n  ID rb_symm = rb_intern(\"symmetric\"),\n     rb_skew = rb_intern(\"skew\"),\n     rb_herm = rb_intern(\"hermitian\");\n     // nm_rb_upper, nm_rb_lower already set\n\n  ID symm_id = rb_to_id(symm);\n\n  if (symm_id == rb_symm)            return nm::SYMM;\n  else if (symm_id == rb_skew)       return nm::SKEW;\n  else if (symm_id == rb_herm)       return nm::HERM;\n  else if (symm_id == nm_rb_upper)   return nm::UPPER;\n  else if (symm_id == nm_rb_lower)   return nm::LOWER;\n  else                            rb_raise(rb_eArgError, \"unrecognized symmetry argument\");\n\n  return nm::NONSYMM;\n}\n\n\n\nvoid read_padded_shape(std::ifstream& f, size_t dim, size_t* shape) {\n  size_t bytes_read = 0;\n\n  // Read shape\n  for (size_t i = 0; i < dim; ++i) {\n    size_t s;\n    f.read(reinterpret_cast<char*>(&s), sizeof(size_t));\n    shape[i] = s;\n\n    bytes_read += sizeof(size_t);\n  }\n\n  // Ignore padding\n  f.ignore(bytes_read % 8);\n}\n\n\nvoid write_padded_shape(std::ofstream& f, size_t dim, size_t* shape) {\n  size_t bytes_written = 0;\n\n  // Write shape\n  for (size_t i = 0; i < dim; ++i) {\n    size_t s = shape[i];\n    f.write(reinterpret_cast<const char*>(&s), sizeof(size_t));\n\n    bytes_written += sizeof(size_t);\n  }\n\n  // Pad with zeros\n  size_t zero = 0;\n  while (bytes_written % 8) {\n    f.write(reinterpret_cast<const char*>(&zero), sizeof(size_t));\n\n    bytes_written += sizeof(IType);\n  }\n}\n\n\nvoid read_padded_yale_elements(std::ifstream& f, YALE_STORAGE* storage, size_t length, nm::symm_t symm, nm::dtype_t dtype) {\n  NAMED_DTYPE_TEMPLATE_TABLE_NO_ROBJ(ttable, nm::read_padded_yale_elements, void, std::ifstream&, YALE_STORAGE*, size_t, nm::symm_t)\n\n  ttable[dtype](f, storage, length, symm);\n}\n\n\nvoid write_padded_yale_elements(std::ofstream& f, YALE_STORAGE* storage, size_t length, nm::symm_t symm, nm::dtype_t dtype) {\n  NAMED_DTYPE_TEMPLATE_TABLE_NO_ROBJ(ttable, nm::write_padded_yale_elements, void, std::ofstream& f, YALE_STORAGE*, size_t, nm::symm_t)\n\n  ttable[dtype](f, storage, length, symm);\n}\n\n\nvoid read_padded_dense_elements(std::ifstream& f, DENSE_STORAGE* storage, nm::symm_t symm, nm::dtype_t dtype) {\n  NAMED_DTYPE_TEMPLATE_TABLE_NO_ROBJ(ttable, nm::read_padded_dense_elements, void, std::ifstream&, DENSE_STORAGE*, nm::symm_t)\n\n  ttable[dtype](f, storage, symm);\n}\n\n\nvoid write_padded_dense_elements(std::ofstream& f, DENSE_STORAGE* storage, nm::symm_t symm, nm::dtype_t dtype) {\n  NAMED_DTYPE_TEMPLATE_TABLE_NO_ROBJ(ttable, nm::write_padded_dense_elements, void, std::ofstream& f, DENSE_STORAGE*, nm::symm_t)\n\n  ttable[dtype](f, storage, symm);\n}\n\n\n/*\n * Helper function to get exceptions in the module Errno (e.g., ENOENT). Example:\n *\n *     rb_raise(rb_get_errno_exc(\"ENOENT\"), RSTRING_PTR(filename));\n */\nstatic VALUE rb_get_errno_exc(const char* which) {\n  return rb_const_get(rb_const_get(rb_cObject, rb_intern(\"Errno\")), rb_intern(which));\n}\n\n\n\n/*\n * Binary file writer for NMatrix standard format. file should be a path, which we aren't going to\n * check very carefully (in other words, this function should generally be called from a Ruby\n * helper method). Function also takes a symmetry argument, which allows us to specify that we only want to\n * save the upper triangular portion of the matrix (or if the matrix is a lower triangular matrix, only\n * the lower triangular portion). nil means regular storage.\n */\nstatic VALUE nm_write(int argc, VALUE* argv, VALUE self) {\n  using std::ofstream;\n\n  if (argc < 1 || argc > 2) {\n    rb_raise(rb_eArgError, \"Expected one or two arguments\");\n  }\n\n  NM_CONSERVATIVE(nm_register_values(argv, argc));\n  NM_CONSERVATIVE(nm_register_value(&self));\n\n  VALUE file = argv[0],\n        symm = argc == 1 ? Qnil : argv[1];\n\n  NMATRIX* nmatrix;\n  UnwrapNMatrix( self, nmatrix );\n\n  nm::symm_t symm_ = interpret_symm(symm);\n\n  if (nmatrix->storage->dtype == nm::RUBYOBJ) {\n    NM_CONSERVATIVE(nm_unregister_values(argv, argc));\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n    rb_raise(rb_eNotImpError, \"Ruby Object writing is not implemented yet\");\n  }\n\n  // Get the dtype, stype, itype, and symm and ensure they're the correct number of bytes.\n  uint8_t st = static_cast<uint8_t>(nmatrix->stype),\n          dt = static_cast<uint8_t>(nmatrix->storage->dtype),\n          sm = static_cast<uint8_t>(symm_);\n  uint16_t dim = nmatrix->storage->dim;\n\n  //FIXME: Cast the matrix to the smallest possible index type. Write that in the place of IType.\n\n  // Check arguments before starting to write.\n  if (nmatrix->stype == nm::LIST_STORE) {\n    NM_CONSERVATIVE(nm_unregister_values(argv, argc));\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n    rb_raise(nm_eStorageTypeError, \"cannot save list matrix; cast to yale or dense first\");\n  }\n  if (symm_ != nm::NONSYMM) {\n    NM_CONSERVATIVE(nm_unregister_values(argv, argc));\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n\n    if (dim != 2) rb_raise(rb_eArgError, \"symmetry/triangularity not defined for a non-2D matrix\");\n    if (nmatrix->storage->shape[0] != nmatrix->storage->shape[1])\n      rb_raise(rb_eArgError, \"symmetry/triangularity not defined for a non-square matrix\");\n    if (symm_ == nm::HERM &&\n          dt != static_cast<uint8_t>(nm::COMPLEX64) && dt != static_cast<uint8_t>(nm::COMPLEX128) && dt != static_cast<uint8_t>(nm::RUBYOBJ))\n      rb_raise(rb_eArgError, \"cannot save a non-complex matrix as hermitian\");\n  }\n\n  ofstream f(RSTRING_PTR(file), std::ios::out | std::ios::binary);\n\n  // Get the NMatrix version information.\n  uint16_t major, minor, release, null16 = 0;\n  get_version_info(major, minor, release);\n\n  // WRITE FIRST 64-BIT BLOCK\n  f.write(reinterpret_cast<const char*>(&major),   sizeof(uint16_t));\n  f.write(reinterpret_cast<const char*>(&minor),   sizeof(uint16_t));\n  f.write(reinterpret_cast<const char*>(&release), sizeof(uint16_t));\n  f.write(reinterpret_cast<const char*>(&null16),  sizeof(uint16_t));\n\n  uint8_t ZERO = 0;\n  // WRITE SECOND 64-BIT BLOCK\n  f.write(reinterpret_cast<const char*>(&dt), sizeof(uint8_t));\n  f.write(reinterpret_cast<const char*>(&st), sizeof(uint8_t));\n  f.write(reinterpret_cast<const char*>(&ZERO),sizeof(uint8_t));\n  f.write(reinterpret_cast<const char*>(&sm), sizeof(uint8_t));\n  f.write(reinterpret_cast<const char*>(&null16), sizeof(uint16_t));\n  f.write(reinterpret_cast<const char*>(&dim), sizeof(uint16_t));\n\n  // Write shape (in 64-bit blocks)\n  write_padded_shape(f, nmatrix->storage->dim, nmatrix->storage->shape);\n\n  if (nmatrix->stype == nm::DENSE_STORE) {\n    write_padded_dense_elements(f, reinterpret_cast<DENSE_STORAGE*>(nmatrix->storage), symm_, nmatrix->storage->dtype);\n  } else if (nmatrix->stype == nm::YALE_STORE) {\n    YALE_STORAGE* s = reinterpret_cast<YALE_STORAGE*>(nmatrix->storage);\n    uint32_t ndnz   = s->ndnz,\n             length = nm_yale_storage_get_size(s);\n    f.write(reinterpret_cast<const char*>(&ndnz),   sizeof(uint32_t));\n    f.write(reinterpret_cast<const char*>(&length), sizeof(uint32_t));\n\n    write_padded_yale_elements(f, s, length, symm_, s->dtype);\n  }\n\n  f.close();\n\n  NM_CONSERVATIVE(nm_unregister_values(argv, argc));\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n\n  return Qtrue;\n}\n\n\n/*\n * Binary file reader for NMatrix standard format. file should be a path, which we aren't going to\n * check very carefully (in other words, this function should generally be called from a Ruby\n * helper method).\n *\n * Note that currently, this function will by default refuse to read files that are newer than\n * your version of NMatrix. To force an override, set the second argument to anything other than nil.\n *\n * Returns an NMatrix Ruby object.\n */\nstatic VALUE nm_read(int argc, VALUE* argv, VALUE self) {\n  using std::ifstream;\n\n  NM_CONSERVATIVE(nm_register_values(argv, argc));\n  NM_CONSERVATIVE(nm_register_value(&self));\n\n  VALUE file, force_;\n\n  // Read the arguments\n  rb_scan_args(argc, argv, \"11\", &file, &force_);\n  bool force   = (force_ != Qnil && force_ != Qfalse);\n\n\n  if (!RB_FILE_EXISTS(file)) { // FIXME: Errno::ENOENT\n    NM_CONSERVATIVE(nm_unregister_values(argv, argc));\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n    rb_raise(rb_get_errno_exc(\"ENOENT\"), \"%s\", RSTRING_PTR(file));\n  }\n\n  // Open a file stream\n  ifstream f(RSTRING_PTR(file), std::ios::in | std::ios::binary);\n\n  uint16_t major, minor, release;\n  get_version_info(major, minor, release); // compare to NMatrix version\n\n  uint16_t fmajor, fminor, frelease, null16;\n\n  // READ FIRST 64-BIT BLOCK\n  f.read(reinterpret_cast<char*>(&fmajor),   sizeof(uint16_t));\n  f.read(reinterpret_cast<char*>(&fminor),   sizeof(uint16_t));\n  f.read(reinterpret_cast<char*>(&frelease), sizeof(uint16_t));\n  f.read(reinterpret_cast<char*>(&null16),   sizeof(uint16_t));\n\n  int ver  = major * 10000 + minor * 100 + release,\n      fver = fmajor * 10000 + fminor * 100 + release;\n  if (fver > ver && force == false) {\n    NM_CONSERVATIVE(nm_unregister_values(argv, argc));\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n    rb_raise(rb_eIOError, \"File was created in newer version of NMatrix than current (%u.%u.%u)\", fmajor, fminor, frelease);\n  }\n  if (null16 != 0) rb_warn(\"nm_read: Expected zero padding was not zero (0)\\n\");\n\n  uint8_t dt, st, it, sm;\n  uint16_t dim;\n\n  // READ SECOND 64-BIT BLOCK\n  f.read(reinterpret_cast<char*>(&dt), sizeof(uint8_t));\n  f.read(reinterpret_cast<char*>(&st), sizeof(uint8_t));\n  f.read(reinterpret_cast<char*>(&it), sizeof(uint8_t)); // FIXME: should tell how few bytes indices are stored as\n  f.read(reinterpret_cast<char*>(&sm), sizeof(uint8_t));\n  f.read(reinterpret_cast<char*>(&null16), sizeof(uint16_t));\n  f.read(reinterpret_cast<char*>(&dim), sizeof(uint16_t));\n\n  if (null16 != 0) rb_warn(\"nm_read: Expected zero padding was not zero (1)\");\n  nm::stype_t stype = static_cast<nm::stype_t>(st);\n  nm::dtype_t dtype = static_cast<nm::dtype_t>(dt);\n  nm::symm_t  symm  = static_cast<nm::symm_t>(sm);\n  //nm::itype_t itype = static_cast<nm::itype_t>(it);\n\n  // READ NEXT FEW 64-BIT BLOCKS\n  size_t* shape = NM_ALLOC_N(size_t, dim);\n  read_padded_shape(f, dim, shape);\n\n  STORAGE* s;\n  if (stype == nm::DENSE_STORE) {\n    s = nm_dense_storage_create(dtype, shape, dim, NULL, 0);\n    nm_register_storage(stype, s);\n\n    read_padded_dense_elements(f, reinterpret_cast<DENSE_STORAGE*>(s), symm, dtype);\n\n  } else if (stype == nm::YALE_STORE) {\n    uint32_t ndnz, length;\n\n    // READ YALE-SPECIFIC 64-BIT BLOCK\n    f.read(reinterpret_cast<char*>(&ndnz),     sizeof(uint32_t));\n    f.read(reinterpret_cast<char*>(&length),   sizeof(uint32_t));\n\n    s = nm_yale_storage_create(dtype, shape, dim, length); // set length as init capacity\n\n    nm_register_storage(stype, s);\n\n    read_padded_yale_elements(f, reinterpret_cast<YALE_STORAGE*>(s), length, symm, dtype);\n  } else {\n    NM_CONSERVATIVE(nm_unregister_values(argv, argc));\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n    rb_raise(nm_eStorageTypeError, \"please convert to yale or dense before saving\");\n  }\n\n  NMATRIX* nm = nm_create(stype, s);\n\n  // Return the appropriate matrix object (Ruby VALUE)\n  // FIXME: This should probably return CLASS_OF(self) instead of cNMatrix, but I don't know how that works for\n  // FIXME: class methods.\n  nm_register_nmatrix(nm);\n  VALUE to_return = Data_Wrap_Struct(cNMatrix, nm_mark, nm_delete, nm);\n\n  nm_unregister_nmatrix(nm);\n  NM_CONSERVATIVE(nm_unregister_values(argv, argc));\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n  nm_unregister_storage(stype, s);\n\n  switch(stype) {\n  case nm::DENSE_STORE:\n  case nm::YALE_STORE:\n    return to_return;\n  default: // this case never occurs (due to earlier rb_raise)\n    return Qnil;\n  }\n\n}\n\n\n\n/*\n * Create a new NMatrix helper for handling internal ia, ja, and a arguments.\n *\n * This constructor is only called by Ruby code, so we can skip most of the\n * checks.\n */\nstatic VALUE nm_init_yale_from_old_yale(VALUE shape, VALUE dtype, VALUE ia, VALUE ja, VALUE a, VALUE from_dtype, VALUE nm) {\n  size_t dim     = 2;\n  size_t* shape_  = interpret_shape(shape, &dim);\n  nm::dtype_t dtype_  = nm_dtype_from_rbsymbol(dtype);\n  char *ia_       = RSTRING_PTR(ia),\n       *ja_       = RSTRING_PTR(ja),\n       *a_        = RSTRING_PTR(a);\n  nm::dtype_t from_dtype_ = nm_dtype_from_rbsymbol(from_dtype);\n  NMATRIX* nmatrix;\n\n  UnwrapNMatrix( nm, nmatrix );\n\n  nmatrix->stype   = nm::YALE_STORE;\n  nmatrix->storage = (STORAGE*)nm_yale_storage_create_from_old_yale(dtype_, shape_, ia_, ja_, a_, from_dtype_);\n\n  return nm;\n}\n\n/*\n * Check to determine whether matrix is a reference to another matrix.\n */\nstatic VALUE nm_is_ref(VALUE self) {\n  if (NM_SRC(self) == NM_STORAGE(self)) return Qfalse;\n  return Qtrue;\n}\n\n/*\n * call-seq:\n *     slice -> ...\n *\n * Access the contents of an NMatrix at given coordinates, using copying.\n *\n *     n.slice(3,3)  # => 5.0\n *     n.slice(0..1,0..1) #=> matrix [2,2]\n *\n */\nstatic VALUE nm_mget(int argc, VALUE* argv, VALUE self) {\n  static void* (*ttable[nm::NUM_STYPES])(const STORAGE*, SLICE*) = {\n    nm_dense_storage_get,\n    nm_list_storage_get,\n    nm_yale_storage_get\n  };\n  nm::stype_t stype = NM_STYPE(self);\n  return nm_xslice(argc, argv, ttable[stype], nm_delete, self);\n}\n\n/*\n * call-seq:\n *     matrix[indices] -> ...\n *\n * Access the contents of an NMatrix at given coordinates by reference.\n *\n *     n[3,3]  # => 5.0\n *     n[0..1,0..1] #=> matrix [2,2]\n *\n */\nstatic VALUE nm_mref(int argc, VALUE* argv, VALUE self) {\n  static void* (*ttable[nm::NUM_STYPES])(const STORAGE*, SLICE*) = {\n    nm_dense_storage_ref,\n    nm_list_storage_ref,\n    nm_yale_storage_ref\n  };\n  nm::stype_t stype = NM_STYPE(self);\n  return nm_xslice(argc, argv, ttable[stype], nm_delete_ref, self);\n}\n\n/*\n * Modify the contents of an NMatrix in the given cell\n *\n *     n[3,3] = 5.0\n *\n * Also returns the new contents, so you can chain:\n *\n *     n[3,3] = n[2,3] = 5.0\n */\nstatic VALUE nm_mset(int argc, VALUE* argv, VALUE self) {\n\n  size_t dim = NM_DIM(self); // last arg is the value\n\n  VALUE to_return = Qnil;\n\n  if ((size_t)(argc) > NM_DIM(self)+1) {\n    rb_raise(rb_eArgError, \"wrong number of arguments (%d for %lu)\", argc, effective_dim(NM_STORAGE(self))+1);\n  } else {\n    NM_CONSERVATIVE(nm_register_value(&self));\n    NM_CONSERVATIVE(nm_register_values(argv, argc));\n\n    SLICE slice_s;\n    SLICE* slice = &slice_s;\n    slice->coords = NM_ALLOCA_N(size_t, dim);\n    slice->lengths = NM_ALLOCA_N(size_t, dim);\n    init_slice_no_alloc(slice, dim, argc-1, argv, NM_STORAGE(self)->shape);\n\n    static void (*ttable[nm::NUM_STYPES])(VALUE, SLICE*, VALUE) = {\n      nm_dense_storage_set,\n      nm_list_storage_set,\n      nm_yale_storage_set\n    };\n\n    ttable[NM_STYPE(self)](self, slice, argv[argc-1]);\n\n    to_return = argv[argc-1];\n\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n    NM_CONSERVATIVE(nm_unregister_values(argv, argc));\n  }\n\n  return to_return;\n}\n\n/*\n * Matrix multiply (dot product): against another matrix or a vector.\n *\n * For elementwise, use * instead.\n *\n * The two matrices must be of the same stype (for now). If dtype differs, an upcast will occur.\n */\nstatic VALUE nm_multiply(VALUE left_v, VALUE right_v) {\n  NM_CONSERVATIVE(nm_register_value(&left_v));\n  NM_CONSERVATIVE(nm_register_value(&right_v));\n\n  NMATRIX *left, *right;\n\n  UnwrapNMatrix( left_v, left );\n\n  if (NM_RUBYVAL_IS_NUMERIC(right_v)) {\n    NM_CONSERVATIVE(nm_unregister_value(&left_v));\n    NM_CONSERVATIVE(nm_unregister_value(&right_v));\n    return matrix_multiply_scalar(left, right_v);\n  }\n\n  else if (RB_TYPE_P(right_v, T_ARRAY)) {\n    NM_CONSERVATIVE(nm_unregister_value(&left_v));\n    NM_CONSERVATIVE(nm_unregister_value(&right_v));\n    rb_raise(rb_eNotImpError, \"please convert array to nx1 or 1xn NMatrix first\");\n  }\n\n  else { // both are matrices (probably)\n    CheckNMatrixType(right_v);\n    UnwrapNMatrix( right_v, right );\n\n    // work like vector dot product for 1dim\n    if (left->storage->dim == 1 && right->storage->dim == 1) {\n      if (left->storage->shape[0] != right->storage->shape[0]) {\n        NM_CONSERVATIVE(nm_unregister_value(&left_v));\n        NM_CONSERVATIVE(nm_unregister_value(&right_v));\n        rb_raise(rb_eArgError, \"The left- and right-hand sides of the operation must have the same dimensionality.\");\n      } else {\n        VALUE result = elementwise_op(nm::EW_MUL, left_v, right_v);\n        VALUE to_return = rb_funcall(result, rb_intern(\"sum\"),0);\n        NM_CONSERVATIVE(nm_unregister_value(&left_v));\n        NM_CONSERVATIVE(nm_unregister_value(&right_v));\n        return to_return;\n      }\n    }\n\n    if (left->storage->shape[1] != right->storage->shape[0]) {\n      NM_CONSERVATIVE(nm_unregister_value(&left_v));\n      NM_CONSERVATIVE(nm_unregister_value(&right_v));\n      rb_raise(rb_eArgError, \"incompatible dimensions\");\n    }\n\n    if (left->stype != right->stype) {\n      NM_CONSERVATIVE(nm_unregister_value(&left_v));\n      NM_CONSERVATIVE(nm_unregister_value(&right_v));\n      rb_raise(rb_eNotImpError, \"matrices must have same stype\");\n    }\n\n    NM_CONSERVATIVE(nm_unregister_value(&left_v));\n    NM_CONSERVATIVE(nm_unregister_value(&right_v));\n    return matrix_multiply(left, right);\n\n  }\n\n  NM_CONSERVATIVE(nm_unregister_value(&left_v));\n  NM_CONSERVATIVE(nm_unregister_value(&right_v));\n\n  return Qnil;\n}\n\n\n/*\n * call-seq:\n *     dim -> Integer\n *\n * Get the number of dimensions of a matrix.\n *\n * In other words, if you set your matrix to be 3x4, the dim is 2. If the\n * matrix was initialized as 3x4x3, the dim is 3.\n *\n * Use #effective_dim to get the dimension of an NMatrix which acts as a vector (e.g., a column or row).\n */\nstatic VALUE nm_dim(VALUE self) {\n  return INT2FIX(NM_STORAGE(self)->dim);\n}\n\n/*\n * call-seq:\n *     shape -> Array\n *\n * Get the shape (dimensions) of a matrix.\n */\nstatic VALUE nm_shape(VALUE self) {\n  NM_CONSERVATIVE(nm_register_value(&self));\n  STORAGE* s   = NM_STORAGE(self);\n\n  // Copy elements into a VALUE array and then use those to create a Ruby array with rb_ary_new4.\n  VALUE* shape = NM_ALLOCA_N(VALUE, s->dim);\n  nm_register_values(shape, s->dim);\n  for (size_t index = 0; index < s->dim; ++index)\n    shape[index] = INT2FIX(s->shape[index]);\n\n  nm_unregister_values(shape, s->dim);\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n  return rb_ary_new4(s->dim, shape);\n}\n\n\n/*\n * call-seq:\n *     offset -> Array\n *\n * Get the offset (slice position) of a matrix. Typically all zeros, unless you have a reference slice.\n */\nstatic VALUE nm_offset(VALUE self) {\n  NM_CONSERVATIVE(nm_register_value(&self));\n  STORAGE* s   = NM_STORAGE(self);\n\n  // Copy elements into a VALUE array and then use those to create a Ruby array with rb_ary_new4.\n  VALUE* offset = NM_ALLOCA_N(VALUE, s->dim);\n  nm_register_values(offset, s->dim);\n  for (size_t index = 0; index < s->dim; ++index)\n    offset[index] = INT2FIX(s->offset[index]);\n\n  nm_unregister_values(offset, s->dim);\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n  return rb_ary_new4(s->dim, offset);\n}\n\n\n/*\n * call-seq:\n *     supershape -> Array\n *\n * Get the shape of a slice's parent.\n */\nstatic VALUE nm_supershape(VALUE self) {\n\n  STORAGE* s   = NM_STORAGE(self);\n  if (s->src == s) {\n    return nm_shape(self); // easy case (not a slice)\n  }\n  else s = s->src;\n\n  NM_CONSERVATIVE(nm_register_value(&self));\n\n  VALUE* shape = NM_ALLOCA_N(VALUE, s->dim);\n  nm_register_values(shape, s->dim);\n  for (size_t index = 0; index < s->dim; ++index)\n    shape[index] = INT2FIX(s->shape[index]);\n\n  nm_unregister_values(shape, s->dim);\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n  return rb_ary_new4(s->dim, shape);\n}\n\n/*\n * call-seq:\n *     stype -> Symbol\n *\n * Get the storage type (stype) of a matrix, e.g., :yale, :dense, or :list.\n */\nstatic VALUE nm_stype(VALUE self) {\n  NM_CONSERVATIVE(nm_register_value(&self));\n  VALUE stype = ID2SYM(rb_intern(STYPE_NAMES[NM_STYPE(self)]));\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n  return stype;\n}\n\n/*\n * call-seq:\n *     symmetric? -> Boolean\n *\n * Is this matrix symmetric?\n */\nstatic VALUE nm_symmetric(VALUE self) {\n  return is_symmetric(self, false);\n}\n\n\n/*\n * Gets the dimension of a matrix which might be a vector (have one or more shape components of size 1).\n */\nstatic size_t effective_dim(STORAGE* s) {\n  size_t d = 0;\n  for (size_t i = 0; i < s->dim; ++i) {\n    if (s->shape[i] != 1) d++;\n  }\n  return d;\n}\n\n\n/*\n * call-seq:\n *     effective_dim -> Fixnum\n *\n * Returns the number of dimensions that don't have length 1. Guaranteed to be less than or equal to #dim.\n */\nstatic VALUE nm_effective_dim(VALUE self) {\n  return INT2FIX(effective_dim(NM_STORAGE(self)));\n}\n\n\n/*\n * Get a slice of an NMatrix.\n */\nstatic VALUE nm_xslice(int argc, VALUE* argv, void* (*slice_func)(const STORAGE*, SLICE*), void (*delete_func)(NMATRIX*), VALUE self) {\n  VALUE result = Qnil;\n\n  STORAGE* s = NM_STORAGE(self);\n\n  if (NM_DIM(self) < (size_t)(argc)) {\n    rb_raise(rb_eArgError, \"wrong number of arguments (%d for %lu)\", argc, effective_dim(s));\n  } else {\n\n    NM_CONSERVATIVE(nm_register_values(argv, argc));\n    NM_CONSERVATIVE(nm_register_value(&self));\n\n    nm_register_value(&result);\n\n    SLICE slice_s;\n    SLICE* slice = &slice_s;\n    size_t dim = NM_DIM(self);\n    slice->coords = NM_ALLOCA_N(size_t, dim);\n    slice->lengths = NM_ALLOCA_N(size_t, dim);\n    init_slice_no_alloc(slice, dim, argc, argv, s->shape);\n\n    if (slice->single) {\n      static void* (*ttable[nm::NUM_STYPES])(const STORAGE*, SLICE*) = {\n        nm_dense_storage_ref,\n        nm_list_storage_ref,\n        nm_yale_storage_ref\n      };\n\n      if (NM_DTYPE(self) == nm::RUBYOBJ)  result = *reinterpret_cast<VALUE*>( ttable[NM_STYPE(self)](s, slice) );\n      else                                result = nm::rubyobj_from_cval( ttable[NM_STYPE(self)](s, slice), NM_DTYPE(self) ).rval;\n\n    } else {\n\n      NMATRIX* mat  = NM_ALLOC(NMATRIX);\n      mat->stype    = NM_STYPE(self);\n      mat->storage  = (STORAGE*)((*slice_func)( s, slice ));\n      nm_register_nmatrix(mat);\n      result        = Data_Wrap_Struct(CLASS_OF(self), nm_mark, delete_func, mat);\n      nm_unregister_nmatrix(mat);\n    }\n  }\n\n  nm_unregister_value(&result);\n  NM_CONSERVATIVE(nm_unregister_values(argv, argc));\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n\n  return result;\n}\n\n//////////////////////\n// Helper Functions //\n//////////////////////\n\nstatic VALUE unary_op(nm::unaryop_t op, VALUE self) {\n  NM_CONSERVATIVE(nm_register_value(&self));\n  NMATRIX* left;\n  UnwrapNMatrix(self, left);\n  std::string sym;\n\n  switch(left->stype) {\n  case nm::DENSE_STORE:\n    sym = \"__dense_unary_\" + nm::UNARYOPS[op] + \"__\";\n    break;\n  case nm::YALE_STORE:\n    sym = \"__yale_unary_\" + nm::UNARYOPS[op]  + \"__\";\n    break;\n  case nm::LIST_STORE:\n    sym = \"__list_unary_\" + nm::UNARYOPS[op]  + \"__\";\n    break;\n  }\n\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n  return rb_funcall(self, rb_intern(sym.c_str()), 0);\n}\n\nstatic void check_dims_and_shape(VALUE left_val, VALUE right_val) {\n    // Check that the left- and right-hand sides have the same dimensionality.\n    if (NM_DIM(left_val) != NM_DIM(right_val)) {\n      rb_raise(rb_eArgError, \"The left- and right-hand sides of the operation must have the same dimensionality.\");\n    }\n    // Check that the left- and right-hand sides have the same shape.\n    if (memcmp(&NM_SHAPE(left_val, 0), &NM_SHAPE(right_val, 0), sizeof(size_t) * NM_DIM(left_val)) != 0) {\n      rb_raise(rb_eArgError, \"The left- and right-hand sides of the operation must have the same shape.\");\n    }\n}\n\nstatic VALUE elementwise_op(nm::ewop_t op, VALUE left_val, VALUE right_val) {\n\n  NM_CONSERVATIVE(nm_register_value(&left_val));\n  NM_CONSERVATIVE(nm_register_value(&right_val));\n\n  NMATRIX* left;\n  NMATRIX* result;\n\n  CheckNMatrixType(left_val);\n  UnwrapNMatrix(left_val, left);\n\n  if (!IsNMatrixType(right_val)) {\n    // This is a matrix-scalar element-wise operation.\n    std::string sym;\n    switch(left->stype) {\n    case nm::DENSE_STORE:\n      sym = \"__dense_scalar_\" + nm::EWOP_NAMES[op] + \"__\";\n      break;\n    case nm::YALE_STORE:\n      sym = \"__yale_scalar_\" + nm::EWOP_NAMES[op] + \"__\";\n      break;\n    case nm::LIST_STORE:\n      sym = \"__list_scalar_\" + nm::EWOP_NAMES[op] + \"__\";\n      break;\n    default:\n      NM_CONSERVATIVE(nm_unregister_value(&left_val));\n      NM_CONSERVATIVE(nm_unregister_value(&right_val));\n      rb_raise(rb_eNotImpError, \"unknown storage type requested scalar element-wise operation\");\n    }\n    VALUE symv = rb_intern(sym.c_str());\n    NM_CONSERVATIVE(nm_unregister_value(&left_val));\n    NM_CONSERVATIVE(nm_unregister_value(&right_val));\n    return rb_funcall(left_val, symv, 1, right_val);\n\n  } else {\n\n    check_dims_and_shape(left_val, right_val);\n\n    NMATRIX* right;\n    UnwrapNMatrix(right_val, right);\n\n    if (left->stype == right->stype) {\n      std::string sym;\n\n      switch(left->stype) {\n      case nm::DENSE_STORE:\n        sym = \"__dense_elementwise_\" + nm::EWOP_NAMES[op] + \"__\";\n        break;\n      case nm::YALE_STORE:\n        sym = \"__yale_elementwise_\" + nm::EWOP_NAMES[op] + \"__\";\n        break;\n      case nm::LIST_STORE:\n        sym = \"__list_elementwise_\" + nm::EWOP_NAMES[op] + \"__\";\n        break;\n      default:\n        NM_CONSERVATIVE(nm_unregister_value(&left_val));\n        NM_CONSERVATIVE(nm_unregister_value(&right_val));\n        rb_raise(rb_eNotImpError, \"unknown storage type requested element-wise operation\");\n      }\n\n      VALUE symv = rb_intern(sym.c_str());\n      NM_CONSERVATIVE(nm_unregister_value(&left_val));\n      NM_CONSERVATIVE(nm_unregister_value(&right_val));\n      return rb_funcall(left_val, symv, 1, right_val);\n\n    } else {\n      NM_CONSERVATIVE(nm_unregister_value(&left_val));\n      NM_CONSERVATIVE(nm_unregister_value(&right_val));\n      rb_raise(rb_eArgError, \"Element-wise operations are not currently supported between matrices with differing stypes.\");\n    }\n  }\n\n  NM_CONSERVATIVE(nm_unregister_value(&left_val));\n  NM_CONSERVATIVE(nm_unregister_value(&right_val));\n  return Data_Wrap_Struct(CLASS_OF(left_val), nm_mark, nm_delete, result);\n}\n\nstatic VALUE noncom_elementwise_op(nm::noncom_ewop_t op, VALUE self, VALUE other, VALUE flip) {\n\n  NM_CONSERVATIVE(nm_register_value(&self));\n  NM_CONSERVATIVE(nm_register_value(&other));\n\n  NMATRIX* self_nm;\n  NMATRIX* result;\n\n  CheckNMatrixType(self);\n  UnwrapNMatrix(self, self_nm);\n\n  if (!IsNMatrixType(other)) {\n    // This is a matrix-scalar element-wise operation.\n    std::string sym;\n    switch(self_nm->stype) {\n    case nm::DENSE_STORE:\n      sym = \"__dense_scalar_\" + nm::NONCOM_EWOP_NAMES[op] + \"__\";\n      break;\n    case nm::YALE_STORE:\n      sym = \"__yale_scalar_\" + nm::NONCOM_EWOP_NAMES[op] + \"__\";\n      break;\n    case nm::LIST_STORE:\n      sym = \"__list_scalar_\" + nm::NONCOM_EWOP_NAMES[op] + \"__\";\n      break;\n    default:\n      NM_CONSERVATIVE(nm_unregister_value(&self));\n      NM_CONSERVATIVE(nm_unregister_value(&other));\n      rb_raise(rb_eNotImpError, \"unknown storage type requested scalar element-wise operation\");\n    }\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n    NM_CONSERVATIVE(nm_unregister_value(&other));\n    return rb_funcall(self, rb_intern(sym.c_str()), 2, other, flip);\n\n  } else {\n\n    check_dims_and_shape(self, other);\n\n    NMATRIX* other_nm;\n    UnwrapNMatrix(other, other_nm);\n\n    if (self_nm->stype == other_nm->stype) {\n      std::string sym;\n\n      switch(self_nm->stype) {\n      case nm::DENSE_STORE:\n        sym = \"__dense_elementwise_\" + nm::NONCOM_EWOP_NAMES[op] + \"__\";\n        break;\n      case nm::YALE_STORE:\n        sym = \"__yale_elementwise_\" + nm::NONCOM_EWOP_NAMES[op] + \"__\";\n        break;\n      case nm::LIST_STORE:\n        sym = \"__list_elementwise_\" + nm::NONCOM_EWOP_NAMES[op] + \"__\";\n        break;\n      default:\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n  NM_CONSERVATIVE(nm_unregister_value(&other));\n  rb_raise(rb_eNotImpError, \"unknown storage type requested element-wise operation\");\n      }\n      NM_CONSERVATIVE(nm_unregister_value(&self));\n      NM_CONSERVATIVE(nm_unregister_value(&other));\n      return rb_funcall(self, rb_intern(sym.c_str()), 2, other, flip);\n\n    } else {\n      nm_unregister_value(&self);\n      nm_unregister_value(&other);\n      rb_raise(rb_eArgError, \"Element-wise operations are not currently supported between matrices with differing stypes.\");\n    }\n  }\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n  NM_CONSERVATIVE(nm_unregister_value(&other));\n  return Data_Wrap_Struct(CLASS_OF(self), nm_mark, nm_delete, result);\n}\n\n/*\n * Check to determine whether matrix is a reference to another matrix.\n */\nbool is_ref(const NMATRIX* matrix) {\n  return matrix->storage->src != matrix->storage;\n}\n\n/*\n * Helper function for nm_symmetric and nm_hermitian.\n */\nstatic VALUE is_symmetric(VALUE self, bool hermitian) {\n  NM_CONSERVATIVE(nm_register_value(&self));\n\n  NMATRIX* m;\n  UnwrapNMatrix(self, m);\n  bool is_symmetric = false;\n\n  if (m->storage->shape[0] == m->storage->shape[1] and m->storage->dim == 2) {\n    if (NM_STYPE(self) == nm::DENSE_STORE) {\n      if (hermitian) {\n        is_symmetric = nm_dense_storage_is_hermitian((DENSE_STORAGE*)(m->storage), m->storage->shape[0]);\n\n      } else {\n        is_symmetric = nm_dense_storage_is_symmetric((DENSE_STORAGE*)(m->storage), m->storage->shape[0]);\n      }\n\n    } else {\n      // TODO: Implement, at the very least, yale_is_symmetric. Model it after yale/transp.template.c.\n      NM_CONSERVATIVE(nm_unregister_value(&self));\n      rb_raise(rb_eNotImpError, \"symmetric? and hermitian? only implemented for dense currently\");\n    }\n\n  }\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n  return is_symmetric ? Qtrue : Qfalse;\n}\n\n///////////////////////\n// Utility Functions //\n///////////////////////\n\n/*\n * Guess the dtype given a Ruby VALUE and return it as a symbol.\n *\n * Not to be confused with nm_dtype_guess, which returns an nm::dtype_t. (This calls that.)\n */\nstatic VALUE nm_guess_dtype(VALUE self, VALUE v) {\n  return ID2SYM(rb_intern(DTYPE_NAMES[nm_dtype_guess(v)]));\n}\n\n/*\n * Get the minimum allowable dtype for a Ruby VALUE and return it as a symbol.\n */\nstatic VALUE nm_min_dtype(VALUE self, VALUE v) {\n  return ID2SYM(rb_intern(DTYPE_NAMES[nm_dtype_min(v)]));\n}\n\n/*\n * Helper for nm_dtype_min(), handling integers.\n */\nnm::dtype_t nm_dtype_min_fixnum(int64_t v) {\n  if (v >= 0 && v <= UCHAR_MAX) return nm::BYTE;\n  else {\n    v = std::abs(v);\n    if (v <= CHAR_MAX) return nm::INT8;\n    else if (v <= SHRT_MAX) return nm::INT16;\n    else if (v <= INT_MAX) return nm::INT32;\n    else return nm::INT64;\n  }\n}\n\n/*\n * Return the minimum dtype required to store a given value.\n *\n * This is kind of arbitrary. For Float, it always returns :float32 for example, since in some cases neither :float64\n * not :float32 are sufficient.\n *\n * This function is used in upcasting for scalar math. We want to ensure that :int8 + 1 does not return an :int64, basically.\n *\n * FIXME: Eventually, this function should actually look at the value stored in Fixnums (for example), so that it knows\n * whether to return :int64 or :int32.\n */\nnm::dtype_t nm_dtype_min(VALUE v) {\n\n  if (RB_TYPE_P(v, T_FIXNUM))\n    return nm_dtype_min_fixnum(FIX2LONG(v));\n  else if (RB_TYPE_P(v, T_BIGNUM))\n    return nm::INT64;\n  else if (RB_TYPE_P(v, T_FLOAT))\n    return nm::FLOAT32;\n  else if (RB_TYPE_P(v, T_COMPLEX))\n    return nm::COMPLEX64;\n  else if (RB_TYPE_P(v, T_STRING))\n    return RSTRING_LEN(v) == 1 ? nm::BYTE : nm::RUBYOBJ;\n  else if (RB_TYPE_P(v, T_TRUE) || RB_TYPE_P(v, T_FALSE) || RB_TYPE_P(v, T_NIL))\n    return nm::RUBYOBJ;\n  else\n    return nm::RUBYOBJ;\n}\n\n\n/*\n * Guess the data type given a value.\n *\n * TODO: Probably needs some work for Bignum.\n */\nnm::dtype_t nm_dtype_guess(VALUE v) {\n  if (RB_TYPE_P(v, T_TRUE) || RB_TYPE_P(v, T_FALSE) || RB_TYPE_P(v, T_NIL))\n    return nm::RUBYOBJ;\n  else if (RB_TYPE_P(v, T_STRING))\n    return RSTRING_LEN(v) == 1 ? nm::BYTE : nm::RUBYOBJ;\n  else if (RB_TYPE_P(v, T_FIXNUM))\n#if SIZEOF_INT == 8\n    return nm::INT64;\n#elif SIZEOF_INT == 4\n    return nm::INT32;\n#else\n    return nm::INT16;\n#endif\n  else if (RB_TYPE_P(v, T_BIGNUM))\n    return nm::INT64;\n#if SIZEOF_FLOAT == 4\n  else if (RB_TYPE_P(v, T_COMPLEX))\n    return nm::COMPLEX128;\n  else if (RB_TYPE_P(v, T_FLOAT))\n    return nm::FLOAT64;\n#elif SIZEOF_FLOAT == 2\n  else if (RB_TYPE_P(v, T_COMPLEX))\n    return nm::COMPLEX64;\n  else if (RB_TYPE_P(v, T_FLOAT))\n    return nm::FLOAT32;\n#endif\n  else if (RB_TYPE_P(v, T_ARRAY))\n    /*\n     * May be passed for dense -- for now, just look at the first element.\n     *\n     * TODO: Look at entire array for most specific type.\n     */\n    return nm_dtype_guess(RARRAY_AREF(v, 0));\n  else {\n    RB_P(v);\n    rb_raise(rb_eArgError, \"Unable to guess a data type from provided parameters; data type must be specified manually.\");\n  }\n}\n\n/*\n * Modify an existing SLICE object (with properly allocated memory),\n * so that it will contain the appropriate coordinate and length information\n * for accessing some part of a matrix.\n */\nstatic void init_slice_no_alloc(SLICE* slice, size_t dim, int argc, VALUE* arg, size_t* shape) {\n  NM_CONSERVATIVE(nm_register_values(arg, argc));\n\n  VALUE beg, end;\n  int excl;\n\n  slice->single = true;\n\n  // r is the shape position; t is the slice position. They may differ when we're dealing with a\n  // matrix where the effective dimension is less than the dimension (e.g., a vector).\n  for (size_t r = 0, t = 0; r < dim; ++r) {\n    VALUE v = t == (unsigned int)argc ? Qnil : arg[t];\n\n    // if the current shape indicates a vector and fewer args were supplied than necessary, just use 0\n    if (argc - t + r < dim && shape[r] == 1) {\n      slice->coords[r]  = 0;\n      slice->lengths[r] = 1;\n\n    } else if (FIXNUM_P(v)) { // this used CLASS_OF before, which is inefficient for fixnum\n      int v_ = FIX2INT(v);\n      if (v_ < 0) // checking for negative indexes\n        slice->coords[r]  = shape[r]+v_;\n      else\n        slice->coords[r]  = v_;\n      slice->lengths[r] = 1;\n      t++;\n\n    } else if (SYMBOL_P(v) && rb_to_id(v) == nm_rb_mul) { // :* means the whole possible range\n\n      slice->coords[r]  = 0;\n      slice->lengths[r] = shape[r];\n      slice->single     = false;\n      t++;\n\n    } else if (CLASS_OF(v) == rb_cRange) {\n      rb_range_values(arg[t], &beg, &end, &excl);\n\n      int begin_ = FIX2INT(beg);\n      int end_   = FIX2INT(end);\n\n      slice->coords[r] = (begin_ < 0) ? shape[r] + begin_ : begin_;\n\n      // Exclude last element for a...b range\n      if (end_ < 0)\n        slice->lengths[r] = shape[r] + end_ - slice->coords[r] + (excl ? 0 : 1);\n      else\n        slice->lengths[r] = end_ - slice->coords[r] + (excl ? 0 : 1);\n\n      slice->single     = false;\n      t++;\n\n    } else {\n      NM_CONSERVATIVE(nm_unregister_values(arg, argc));\n      rb_raise(rb_eArgError, \"expected Fixnum or Range for slice component instead of %s\", rb_obj_classname(v));\n    }\n\n    if (slice->coords[r] > shape[r] || slice->coords[r] + slice->lengths[r] > shape[r]) {\n      NM_CONSERVATIVE(nm_unregister_values(arg, argc));\n      rb_raise(rb_eRangeError, \"slice is larger than matrix in dimension %lu (slice component %lu)\", r, t);\n    }\n  }\n\n  NM_CONSERVATIVE(nm_unregister_values(arg, argc));\n}\n\n#ifdef BENCHMARK\n/*\n * A simple function used when benchmarking NMatrix.\n */\nstatic double get_time(void) {\n  struct timeval t;\n  struct timezone tzp;\n\n  gettimeofday(&t, &tzp);\n\n  return t.tv_sec + t.tv_usec*1e-6;\n}\n#endif\n\n/*\n * The argv parameter will be either 1 or 2 elements.  If 1, could be either\n * initial or dtype.  If 2, is initial and dtype. This function returns the\n * dtype.\n */\nstatic nm::dtype_t interpret_dtype(int argc, VALUE* argv, nm::stype_t stype) {\n  int offset;\n\n  switch (argc) {\n    case 1:\n      offset = 0;\n      break;\n\n    case 2:\n      offset = 1;\n      break;\n\n    default:\n      rb_raise(rb_eArgError, \"Need an initial value or a dtype.\");\n      break;\n  }\n\n  if (SYMBOL_P(argv[offset])) {\n    return nm_dtype_from_rbsymbol(argv[offset]);\n\n  } else if (RB_TYPE_P(argv[offset], T_STRING)) {\n    return nm_dtype_from_rbstring(StringValue(argv[offset]));\n\n  } else if (stype == nm::YALE_STORE) {\n    rb_raise(rb_eArgError, \"Yale storage class requires a dtype.\");\n\n  } else {\n    return nm_dtype_guess(argv[0]);\n  }\n}\n\n/*\n * Convert an Ruby value or an array of Ruby values into initial C values.\n */\nstatic void* interpret_initial_value(VALUE arg, nm::dtype_t dtype) {\n  NM_CONSERVATIVE(nm_register_value(&arg));\n\n  unsigned int index;\n  void* init_val;\n\n  if (RB_TYPE_P(arg, T_ARRAY)) {\n    // Array\n    init_val = NM_ALLOC_N(char, DTYPE_SIZES[dtype] * RARRAY_LEN(arg));\n    NM_CHECK_ALLOC(init_val);\n    for (index = 0; index < RARRAY_LEN(arg); ++index) {\n      rubyval_to_cval(RARRAY_AREF(arg, index), dtype, (char*)init_val + (index * DTYPE_SIZES[dtype]));\n    }\n\n  } else {\n    // Single value\n    init_val = rubyobj_to_cval(arg, dtype);\n  }\n\n  NM_CONSERVATIVE(nm_unregister_value(&arg));\n  return init_val;\n}\n\n/*\n * Convert the shape argument, which may be either a Ruby value or an array of\n * Ruby values, into C values.  The second argument is where the dimensionality\n * of the matrix will be stored.  The function itself returns a pointer to the\n * array describing the shape, which must be freed manually.\n */\nstatic size_t* interpret_shape(VALUE arg, size_t* dim) {\n  NM_CONSERVATIVE(nm_register_value(&arg));\n  size_t* shape;\n\n  if (RB_TYPE_P(arg, T_ARRAY)) {\n    *dim = RARRAY_LEN(arg);\n    shape = NM_ALLOC_N(size_t, *dim);\n\n    for (size_t index = 0; index < *dim; ++index) {\n      shape[index] = FIX2UINT( RARRAY_AREF(arg, index) );\n    }\n\n  } else if (FIXNUM_P(arg)) {\n    *dim = 2;\n    shape = NM_ALLOC_N(size_t, *dim);\n\n    shape[0] = FIX2UINT(arg);\n    shape[1] = FIX2UINT(arg);\n\n  } else {\n    nm_unregister_value(&arg);\n    rb_raise(rb_eArgError, \"Expected an array of numbers or a single Fixnum for matrix shape\");\n  }\n\n  NM_CONSERVATIVE(nm_unregister_value(&arg));\n  return shape;\n}\n\n/*\n * Convert a Ruby symbol or string into an storage type.\n */\nstatic nm::stype_t interpret_stype(VALUE arg) {\n  if (SYMBOL_P(arg)) {\n    return nm_stype_from_rbsymbol(arg);\n\n  } else if (RB_TYPE_P(arg, T_STRING)) {\n    return nm_stype_from_rbstring(StringValue(arg));\n\n  } else {\n    rb_raise(rb_eArgError, \"Expected storage type\");\n  }\n}\n\n//////////////////\n// Math Helpers //\n//////////////////\n\nSTORAGE* matrix_storage_cast_alloc(NMATRIX* matrix, nm::dtype_t new_dtype) {\n  if (matrix->storage->dtype == new_dtype && !is_ref(matrix))\n    return matrix->storage;\n\n  CAST_TABLE(cast_copy_storage);\n  return cast_copy_storage[matrix->stype][matrix->stype](matrix->storage, new_dtype, NULL);\n}\n\nSTORAGE_PAIR binary_storage_cast_alloc(NMATRIX* left_matrix, NMATRIX* right_matrix) {\n  nm_register_nmatrix(left_matrix);\n  nm_register_nmatrix(right_matrix);\n\n  STORAGE_PAIR casted;\n  nm::dtype_t new_dtype = Upcast[left_matrix->storage->dtype][right_matrix->storage->dtype];\n\n  casted.left  = matrix_storage_cast_alloc(left_matrix, new_dtype);\n  nm_register_storage(left_matrix->stype, casted.left);\n  casted.right = matrix_storage_cast_alloc(right_matrix, new_dtype);\n\n  nm_unregister_nmatrix(left_matrix);\n  nm_unregister_nmatrix(right_matrix);\n  nm_unregister_storage(left_matrix->stype, casted.left);\n\n  return casted;\n}\n\nstatic VALUE matrix_multiply_scalar(NMATRIX* left, VALUE scalar) {\n  rb_raise(rb_eNotImpError, \"matrix-scalar multiplication not implemented yet\");\n  return Qnil;\n}\n\nstatic VALUE matrix_multiply(NMATRIX* left, NMATRIX* right) {\n  nm_register_nmatrix(left);\n  nm_register_nmatrix(right);\n  ///TODO: multiplication for non-dense and/or non-decimal matrices\n\n  // Make sure both of our matrices are of the correct type.\n  STORAGE_PAIR casted = binary_storage_cast_alloc(left, right);\n  nm_register_storage(left->stype, casted.left);\n  nm_register_storage(right->stype, casted.right);\n\n  size_t*  resulting_shape   = NM_ALLOC_N(size_t, 2);\n  resulting_shape[0] = left->storage->shape[0];\n  resulting_shape[1] = right->storage->shape[1];\n\n  // Sometimes we only need to use matrix-vector multiplication (e.g., GEMM versus GEMV). Find out.\n  bool vector = false;\n  if (resulting_shape[1] == 1) vector = true;\n\n  static STORAGE* (*storage_matrix_multiply[nm::NUM_STYPES])(const STORAGE_PAIR&, size_t*, bool) = {\n    nm_dense_storage_matrix_multiply,\n    nm_list_storage_matrix_multiply,\n    nm_yale_storage_matrix_multiply\n  };\n\n  STORAGE* resulting_storage = storage_matrix_multiply[left->stype](casted, resulting_shape, vector);\n  NMATRIX* result = nm_create(left->stype, resulting_storage);\n  nm_register_nmatrix(result);\n\n  // Free any casted-storage we created for the multiplication.\n  // TODO: Can we make the Ruby GC take care of this stuff now that we're using it?\n  // If we did that, we night not have to re-create these every time, right? Or wrong? Need to do\n  // more research.\n  static void (*free_storage[nm::NUM_STYPES])(STORAGE*) = {\n    nm_dense_storage_delete,\n    nm_list_storage_delete,\n    nm_yale_storage_delete\n  };\n\n  nm_unregister_storage(left->stype, casted.left);\n  if (left->storage != casted.left)   free_storage[result->stype](casted.left);\n\n  nm_unregister_storage(right->stype, casted.right);\n  if (right->storage != casted.right) free_storage[result->stype](casted.right);\n\n  VALUE to_return = result ? Data_Wrap_Struct(cNMatrix, nm_mark, nm_delete, result) : Qnil; // Only if we try to multiply list matrices should we return Qnil.\n\n  nm_unregister_nmatrix(left);\n  nm_unregister_nmatrix(right);\n  nm_unregister_nmatrix(result);\n\n  return to_return;\n}\n\n/*\n * Reduce a matrix to hessenberg form.\n *\n * == Arguments\n *\n * a - The NMatrix to be reduced. This matrix is replaced with the hessenberg form.\n *\n * == Notes \n *\n * LAPACK free.\n */\nstatic VALUE nm_hessenberg(VALUE self, VALUE a) {\n  nm_math_hessenberg(a);\n  \n  return a;\n}\n\n/*\n * Calculate the inverse of a matrix with in-place Gauss-Jordan elimination.\n * Inverse will fail if the largest element in any column in zero. \n *\n * LAPACK free.\n */\nstatic VALUE nm_inverse(VALUE self, VALUE inverse, VALUE bang) {\n\n  if (NM_STYPE(self) != nm::DENSE_STORE) {\n    rb_raise(rb_eNotImpError, \"needs exact determinant implementation for this matrix stype\");\n    return Qnil;\n  }\n\n  if (NM_DIM(self) != 2 || NM_SHAPE0(self) != NM_SHAPE1(self)) {\n    rb_raise(nm_eShapeError, \"matrices must be square to have an inverse defined\");\n    return Qnil;\n  }\n\n  if (bang == Qtrue) {\n    nm_math_inverse(NM_SHAPE0(self), NM_STORAGE_DENSE(self)->elements, \n      NM_DTYPE(self));\n          \n    return self;\n  }\n\n  nm_math_inverse(NM_SHAPE0(inverse), NM_STORAGE_DENSE(inverse)->elements, \n    NM_DTYPE(inverse));\n\n  return inverse;\n}\n\n/*\n * Calculate the exact inverse of a 2x2 or 3x3 matrix.\n *\n * Does not test for invertibility!\n */\nstatic VALUE nm_inverse_exact(VALUE self, VALUE inverse, VALUE lda, VALUE ldb) {\n  if (NM_DIM(self) != 2 || NM_SHAPE0(self) != NM_SHAPE1(self)) {\n    rb_raise(nm_eShapeError, \"matrices must be square to have an inverse defined\");\n    return Qnil;\n  }\n\n  nm::dtype_t dtype = NM_DTYPE(self);\n  void* result = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n  if (dtype == nm::RUBYOBJ) {\n    nm_register_values(reinterpret_cast<VALUE*>(result), 1);\n  }\n  nm::stype_t old_stype = NM_STYPE(self);\n  if (old_stype == nm::LIST_STORE) {\n    self = nm_cast_with_types(self, nm::YALE_STORE, dtype, result);\n    inverse = nm_cast_with_types(inverse, nm::YALE_STORE, dtype, result);\n  }\n\n  if (NM_STYPE(self) == nm::DENSE_STORE) {\n    nm_math_inverse_exact_from_dense(NM_SHAPE0(self), \n      NM_STORAGE_DENSE(self)->elements, FIX2INT(lda), \n      NM_STORAGE_DENSE(inverse)->elements, FIX2INT(ldb), dtype);\n  } else {\n    nm_math_inverse_exact_from_yale(NM_SHAPE0(self), \n      NM_STORAGE_YALE(self), FIX2INT(lda), \n      NM_STORAGE_YALE(inverse), FIX2INT(ldb), dtype);\n  }\n\n  if (old_stype == nm::LIST_STORE) {\n    inverse = nm_cast_with_types(inverse, nm::LIST_STORE, dtype, result);\n  }\n  if (dtype == nm::RUBYOBJ) {\n    nm_unregister_values(reinterpret_cast<VALUE*>(result), 1);\n  }\n  return inverse;\n}\n\n/*\n * Calculate the exact determinant of a dense matrix.\n *\n * Returns nil for dense matrices which are not square or number of dimensions other than 2.\n *\n * Note: Currently only implemented for 2x2 and 3x3 matrices.\n */\nstatic VALUE nm_det_exact(VALUE self) {\n\n  if (NM_DIM(self) != 2 || NM_SHAPE0(self) != NM_SHAPE1(self)) {\n    rb_raise(nm_eShapeError, \"matrices must be square to have a determinant defined\");\n    return Qnil;\n  }\n\n  nm::dtype_t dtype = NM_DTYPE(self);\n  void* result = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n  if (NM_STYPE(self) == nm::LIST_STORE) {\n    self = nm_cast_with_types(self, nm::YALE_STORE, dtype, result);\n  }\n\n  NM_CONSERVATIVE(nm_register_value(&self));\n\n  // Calculate the determinant and then assign it to the return value\n  if (NM_STYPE(self) == nm::DENSE_STORE) {\n    nm_math_det_exact_from_dense(NM_SHAPE0(self), NM_STORAGE_DENSE(self)->elements, \n          NM_SHAPE0(self), NM_DTYPE(self), result);\n  } else {\n    nm_math_det_exact_from_yale(NM_SHAPE0(self), NM_STORAGE_YALE(self), \n          NM_SHAPE0(self), NM_DTYPE(self), result);\n  }\n\n  VALUE to_return;\n  if (dtype == nm::RUBYOBJ) {\n    to_return = *reinterpret_cast<VALUE*>(result);\n\n  } else {\n    to_return = nm::rubyobj_from_cval(result, NM_DTYPE(self)).rval;\n  }\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n\n  return to_return;\n}\n\n\n\n/*\n * Returns the pointer to the matrix storage's data. This is useful primarily when you are using FFI with NMatrix --\n * say, for example, you want to pass a float* to some function, and your NMatrix is a :float32 :dense matrix. Then you\n * can call this function and get that pointer directly instead of copying the data.\n */\nstatic VALUE nm_data_pointer(VALUE self) {\n  //if (NM_DTYPE(self) == nm::LIST_STORE)\n  //  rb_warn(\"pointer requested for list storage, which may be meaningless\");\n\n  // This is actually pretty easy, since all of the storage types have their elements positioned in the same place\n  // relative to one another. So yes, believe it or not, this should work just as well for Yale or list storage as for\n  // dense.\n  return INT2FIX(NM_STORAGE_DENSE(self)->elements);\n}\n\n\n/////////////////\n// Exposed API //\n/////////////////\n\n/*\n * Create a dense matrix. Used by the NMatrix GSL fork. Unlike nm_create, this one copies all of the\n * arrays and such passed in -- so you don't have to allocate and pass a new shape object for every\n * matrix you want to create, for example. Same goes for elements.\n *\n * Returns a properly-wrapped Ruby object as a VALUE.\n *\n * *** Note that this function is for API only. Please do not use it internally.\n *\n * TODO: Add a column-major option for libraries that use column-major matrices.\n */\nVALUE rb_nmatrix_dense_create(nm::dtype_t dtype, size_t* shape, size_t dim, void* elements, size_t length) {\n\n  if (dtype == nm::RUBYOBJ) {\n    nm_register_values(reinterpret_cast<VALUE*>(elements), length);\n  }\n\n  NMATRIX* nm;\n  size_t nm_dim;\n  size_t* shape_copy;\n\n  // Do not allow a dim of 1. Treat it as a column or row matrix.\n  if (dim == 1) {\n    nm_dim        = 2;\n    shape_copy    = NM_ALLOC_N(size_t, nm_dim);\n    shape_copy[0]  = shape[0];\n    shape_copy[1]  = 1;\n\n  } else {\n    nm_dim      = dim;\n    shape_copy  = NM_ALLOC_N(size_t, nm_dim);\n    memcpy(shape_copy, shape, sizeof(size_t)*nm_dim);\n  }\n\n  // Copy elements\n  void* elements_copy = NM_ALLOC_N(char, DTYPE_SIZES[dtype]*length);\n  memcpy(elements_copy, elements, DTYPE_SIZES[dtype]*length);\n\n  // allocate and create the matrix and its storage\n  nm = nm_create(nm::DENSE_STORE, nm_dense_storage_create(dtype, shape_copy, dim, elements_copy, length));\n\n  nm_register_nmatrix(nm);\n\n  VALUE to_return = Data_Wrap_Struct(cNMatrix, nm_mark, nm_delete, nm);\n\n  nm_unregister_nmatrix(nm);\n  if (dtype == nm::RUBYOBJ) {\n    nm_unregister_values(reinterpret_cast<VALUE*>(elements), length);\n  }\n\n  // tell Ruby about the matrix and its storage, particularly how to garbage collect it.\n  return to_return;\n}\n\n/*\n * Create a dense vector. Used by the NMatrix GSL fork.\n *\n * Basically just a convenience wrapper for rb_nmatrix_dense_create().\n *\n * Returns a properly-wrapped Ruby NMatrix object as a VALUE. Included for backwards compatibility\n * for when NMatrix had an NVector class.\n */\nVALUE rb_nvector_dense_create(nm::dtype_t dtype, void* elements, size_t length) {\n  size_t dim = 1, shape = length;\n  return rb_nmatrix_dense_create(dtype, &shape, dim, elements, length);\n}\n"
  },
  {
    "path": "ext/nmatrix/storage/common.cpp",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == common.cpp\n//\n// Code for the STORAGE struct that is common to all storage types.\n\n/*\n * Standard Includes\n */\n\n/*\n * Project Includes\n */\n\n#include \"common.h\"\n\n/*\n * Macros\n */\n\n/*\n * Global Variables\n */\n\n/*\n * Forward Declarations\n */\n\n/*\n * Functions\n */\n\nextern \"C\" {\n  /*\n   * Calculate the number of elements in the dense storage structure, based on\n   * shape and dim.\n   */\n  size_t nm_storage_count_max_elements(const STORAGE* storage) {\n    unsigned int i;\n    size_t count = 1;\n\n    for (i = storage->dim; i-- > 0;) {\n      count *= storage->shape[i];\n    }\n\n    return count;\n  }\n\n  // Helper function used only for the RETURN_SIZED_ENUMERATOR macro. Returns the length of\n  // the matrix's storage.\n  VALUE nm_enumerator_length(VALUE nmatrix) {\n    long len = nm_storage_count_max_elements(NM_STORAGE_DENSE(nmatrix));\n    return LONG2NUM(len);\n  }\n\n} // end of extern \"C\" block\n"
  },
  {
    "path": "ext/nmatrix/storage/common.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == common.h\n//\n// Header file for code common to all storage types.\n\n#ifndef STORAGE_COMMON_H\n#define STORAGE_COMMON_H\n\n/*\n * Standard Includes\n */\n\n#include <ruby.h>\n#include <cmath> // pow().\n#include <type_traits>\n\n/*\n * Project Includes\n */\n#include \"data/data.h\"\n#include \"nmatrix.h\"\n\n/*\n * Macros\n */\n\n#define u_int8_t static_assert(false, \"Please use uint8_t for cross-platform support and consistency.\"); uint8_t\n#define u_int16_t static_assert(false, \"Please use uint16_t for cross-platform support and consistency.\"); uint16_t\n#define u_int32_t static_assert(false, \"Please use uint32_t for cross-platform support and consistency.\"); uint32_t\n#define u_int64_t static_assert(false, \"Please use uint64_t for cross-platform support and consistency.\"); uint64_t\n\nextern \"C\" {\n\n/*\n * Types\n */\n\n// For binary operations involving matrices that need to be casted.\nstruct STORAGE_PAIR {\n  STORAGE* left;\n  STORAGE* right;\n};\n\nstruct SLICE {\n  size_t*  coords; // Coordinate of first element\n  size_t*  lengths; // Lengths of slice\n  bool    single; // true if all lengths equal to 1 (represents single matrix element)\n};\n\n/*\n * Data\n */\n\n/*\n * Functions\n */\n\n  size_t nm_storage_count_max_elements(const STORAGE* storage);\n  VALUE nm_enumerator_length(VALUE nmatrix);\n\n} // end of extern \"C\" block\n\nnamespace nm {\n\n  /*\n   * Templated helper function for element-wise operations, used by dense, yale, and list.\n   */\n  template <ewop_t op, typename LDType, typename RDType>\n  inline VALUE ew_op_switch(LDType left, RDType right) {\n    switch (op) {\n      case EW_ADD:\n        return RubyObject(left + right).rval;\n\n      case EW_SUB:\n        return RubyObject(left - right).rval;\n\n      case EW_MUL:\n        return RubyObject(left * right).rval;\n\n      case EW_DIV:\n        return RubyObject(left / right).rval;\n\n      case EW_POW:\n        return RubyObject(pow(left, right)).rval;\n\n      case EW_MOD:\n        rb_raise(rb_eNotImpError, \"Element-wise modulo is currently not supported.\");\n        break;\n\n      default:\n        rb_raise(rb_eStandardError, \"This should not happen.\");\n    }\n    return Qnil;\n  }\n\n  #define EWOP_INT_INT_DIV(ltype, rtype)       template <>       \\\n  inline VALUE ew_op_switch<EW_DIV>( ltype left, rtype right) { \\\n    if (right == 0) rb_raise(rb_eZeroDivError, \"cannot divide type by 0, would throw SIGFPE\");  \\\n    if ((left > 0 && right > 0) || (left < 0 && right < 0)) \\\n      return left / right;  \\\n    else \\\n      return ( ltype )(std::floor((double)(left) / (double)(right)));  \\\n  }\n\n  #define EWOP_UINT_UINT_DIV(ltype, rtype)       template <>       \\\n  inline VALUE ew_op_switch<EW_DIV>( ltype left, rtype right) { \\\n    if (right == 0) rb_raise(rb_eZeroDivError, \"cannot divide type by 0, would throw SIGFPE\");  \\\n    return left / right;  \\\n  }\n\n  #define EWOP_INT_UINT_DIV(ltype, rtype)       template <>       \\\n  inline VALUE ew_op_switch<EW_DIV>( ltype left, rtype right) { \\\n    if (right == 0) rb_raise(rb_eZeroDivError, \"cannot divide type by 0, would throw SIGFPE\");  \\\n    if (left > 0 )  return left / right;  \\\n    else            return ( ltype )(std::floor((double)(left) / (double)(right)));  \\\n  }\n\n  #define EWOP_UINT_INT_DIV(ltype, rtype)       template <>       \\\n  inline VALUE ew_op_switch<EW_DIV>( ltype left, rtype right) { \\\n    if (right == 0) rb_raise(rb_eZeroDivError, \"cannot divide type by 0, would throw SIGFPE\");  \\\n    if (right > 0)  return left / right;  \\\n    else            return ( ltype )(std::floor((double)(left) / (double)(right)));  \\\n  }\n\n  #define EWOP_FLOAT_INT_DIV(ltype, rtype)       template <>       \\\n  inline VALUE ew_op_switch<EW_DIV>( ltype left, rtype right) { \\\n    return left / (ltype)(right);  \\\n  }\n\n  // Ensure that divisions are done in the Ruby way, and that (int)x/0 always raises a Ruby error instead\n  // of throwing a SIGFPE.\n  EWOP_INT_INT_DIV(int64_t, int64_t)\n  EWOP_INT_INT_DIV(int32_t, int32_t)\n  EWOP_INT_INT_DIV(int32_t, int64_t)\n  EWOP_INT_INT_DIV(int16_t, int16_t)\n  EWOP_INT_INT_DIV(int16_t, int32_t)\n  EWOP_INT_INT_DIV(int16_t, int64_t)\n  EWOP_INT_INT_DIV(int8_t, int8_t)\n  EWOP_INT_UINT_DIV(int8_t, uint8_t)\n  EWOP_INT_INT_DIV(int8_t, int16_t)\n  EWOP_INT_INT_DIV(int8_t, int32_t)\n  EWOP_INT_INT_DIV(int8_t, int64_t)\n  EWOP_UINT_UINT_DIV(uint8_t, uint8_t)\n  EWOP_UINT_INT_DIV(uint8_t, int8_t)\n  EWOP_UINT_INT_DIV(uint8_t, int16_t)\n  EWOP_UINT_INT_DIV(uint8_t, int32_t)\n  EWOP_UINT_INT_DIV(uint8_t, int64_t)\n  EWOP_FLOAT_INT_DIV(float, int8_t)\n  EWOP_FLOAT_INT_DIV(float, uint8_t)\n  EWOP_FLOAT_INT_DIV(float, int16_t)\n  EWOP_FLOAT_INT_DIV(float, int32_t)\n  EWOP_FLOAT_INT_DIV(float, int64_t)\n  EWOP_FLOAT_INT_DIV(double, int8_t)\n  EWOP_FLOAT_INT_DIV(double, uint8_t)\n  EWOP_FLOAT_INT_DIV(double, int16_t)\n  EWOP_FLOAT_INT_DIV(double, int32_t)\n  EWOP_FLOAT_INT_DIV(double, int64_t)\n\n}\n\n#endif // STORAGE_COMMON_H\n"
  },
  {
    "path": "ext/nmatrix/storage/dense/dense.cpp",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == dense.c\n//\n// Dense n-dimensional matrix storage.\n\n/*\n * Standard Includes\n */\n\n#include <ruby.h>\n\n/*\n * Project Includes\n */\n#include \"../../data/data.h\"\n#include \"../../math/long_dtype.h\"\n#include \"../../math/gemm.h\"\n#include \"../../math/gemv.h\"\n#include \"../../math/math.h\"\n#include \"../common.h\"\n#include \"dense.h\"\n\n/*\n * Macros\n */\n\n/*\n * Global Variables\n */\n\n/*\n * Forward Declarations\n */\n\nnamespace nm { namespace dense_storage {\n\n  template<typename LDType, typename RDType>\n  void ref_slice_copy_transposed(const DENSE_STORAGE* rhs, DENSE_STORAGE* lhs);\n\n  template <typename LDType, typename RDType>\n  DENSE_STORAGE* cast_copy(const DENSE_STORAGE* rhs, nm::dtype_t new_dtype);\n\n  template <typename LDType, typename RDType>\n  bool eqeq(const DENSE_STORAGE* left, const DENSE_STORAGE* right);\n\n  template <typename DType>\n  static DENSE_STORAGE* matrix_multiply(const STORAGE_PAIR& casted_storage, size_t* resulting_shape, bool vector);\n\n  template <typename DType>\n  bool is_hermitian(const DENSE_STORAGE* mat, int lda);\n\n  template <typename DType>\n  bool is_symmetric(const DENSE_STORAGE* mat, int lda);\n\n\n  /*\n   * Recursive slicing for N-dimensional matrix.\n   */\n  template <typename LDType, typename RDType>\n  static void slice_copy(DENSE_STORAGE *dest, const DENSE_STORAGE *src, size_t* lengths, size_t pdest, size_t psrc, size_t n) {\n    if (src->dim - n > 1) {\n      for (size_t i = 0; i < lengths[n]; ++i) {\n        slice_copy<LDType,RDType>(dest, src, lengths,\n                   pdest + dest->stride[n]*i,\n                   psrc + src->stride[n]*i,\n                   n + 1);\n      }\n    } else {\n      for (size_t p = 0; p < dest->shape[n]; ++p) {\n        reinterpret_cast<LDType*>(dest->elements)[p+pdest] = reinterpret_cast<RDType*>(src->elements)[p+psrc];\n      }\n      /*memcpy((char*)dest->elements + pdest*DTYPE_SIZES[dest->dtype],\n          (char*)src->elements + psrc*DTYPE_SIZES[src->dtype],\n          dest->shape[n]*DTYPE_SIZES[dest->dtype]); */\n    }\n\n  }\n\n  /*\n   * Recursive function, sets multiple values in a matrix from a single source value. Same basic pattern as slice_copy.\n   */\n  template <typename D>\n  static void slice_set(DENSE_STORAGE* dest, size_t* lengths, size_t pdest, size_t rank, D* const v, size_t v_size, size_t& v_offset) {\n    if (dest->dim - rank > 1) {\n      for (size_t i = 0; i < lengths[rank]; ++i) {\n        slice_set<D>(dest, lengths, pdest + dest->stride[rank] * i, rank + 1, v, v_size, v_offset);\n      }\n    } else {\n      for (size_t p = 0; p < lengths[rank]; ++p, ++v_offset) {\n        if (v_offset >= v_size) v_offset %= v_size;\n\n        D* elem = reinterpret_cast<D*>(dest->elements);\n        elem[p + pdest] = v[v_offset];\n      }\n    }\n  }\n\n\n  /*\n   * Dense storage set/slice-set function, templated version.\n   */\n  template <typename D>\n  void set(VALUE left, SLICE* slice, VALUE right) {\n    NM_CONSERVATIVE(nm_register_value(&left));\n    NM_CONSERVATIVE(nm_register_value(&right));\n\n    DENSE_STORAGE* s = NM_STORAGE_DENSE(left);\n\n    std::pair<NMATRIX*,bool> nm_and_free =\n      interpret_arg_as_dense_nmatrix(right, s->dtype);\n\n    // Map the data onto D* v.\n    D*     v;\n    size_t v_size = 1;\n\n    if (nm_and_free.first) {\n      DENSE_STORAGE* t = reinterpret_cast<DENSE_STORAGE*>(nm_and_free.first->storage);\n      v                = reinterpret_cast<D*>(t->elements);\n      v_size           = nm_storage_count_max_elements(t);\n\n    } else if (RB_TYPE_P(right, T_ARRAY)) {\n      \n      v_size = RARRAY_LEN(right);\n      v      = NM_ALLOC_N(D, v_size);\n      if (s->dtype == nm::RUBYOBJ)\n        nm_register_values(reinterpret_cast<VALUE*>(v), v_size);\n\n      for (size_t m = 0; m < v_size; ++m) {\n        rubyval_to_cval(rb_ary_entry(right, m), s->dtype, &(v[m]));\n      }\n\n    } else {\n      v = reinterpret_cast<D*>(rubyobj_to_cval(right, NM_DTYPE(left)));\n      if (s->dtype == nm::RUBYOBJ)\n        nm_register_values(reinterpret_cast<VALUE*>(v), v_size);\n    }\n\n    if (slice->single) {\n      reinterpret_cast<D*>(s->elements)[nm_dense_storage_pos(s, slice->coords)] = *v;\n    } else {\n      size_t v_offset = 0;\n      slice_set(s, slice->lengths, nm_dense_storage_pos(s, slice->coords), 0, v, v_size, v_offset);\n    }\n\n    // Only free v if it was allocated in this function.\n    if (nm_and_free.first) {\n      if (nm_and_free.second) {\n        nm_delete(nm_and_free.first);\n      }\n    } else {\n      if (s->dtype == nm::RUBYOBJ)\n        nm_unregister_values(reinterpret_cast<VALUE*>(v), v_size);\n      NM_FREE(v);\n    }\n    NM_CONSERVATIVE(nm_unregister_value(&left));\n    NM_CONSERVATIVE(nm_unregister_value(&right));\n\n  }\n\n}} // end of namespace nm::dense_storage\n\n\nextern \"C\" {\n\nstatic size_t* stride(size_t* shape, size_t dim);\nstatic void slice_copy(DENSE_STORAGE *dest, const DENSE_STORAGE *src, size_t* lengths, size_t pdest, size_t psrc, size_t n);\n\n/*\n * Functions\n */\n\n///////////////\n// Lifecycle //\n///////////////\n\n\n/*\n * This creates a dummy with all the properties of dense storage, but no actual elements allocation.\n *\n * elements will be NULL when this function finishes. You can clean up with nm_dense_storage_delete, which will\n * check for that NULL pointer before freeing elements.\n */\nstatic DENSE_STORAGE* nm_dense_storage_create_dummy(nm::dtype_t dtype, size_t* shape, size_t dim) {\n  DENSE_STORAGE* s = NM_ALLOC( DENSE_STORAGE );\n\n  s->dim        = dim;\n  s->shape      = shape;\n  s->dtype      = dtype;\n\n  s->offset     = NM_ALLOC_N(size_t, dim);\n  memset(s->offset, 0, sizeof(size_t)*dim);\n\n  s->stride     = stride(shape, dim);\n  s->count      = 1;\n  s->src        = s;\n\n  s->elements   = NULL;\n\n  return s;\n}\n\n\n/*\n * Note that elements and elements_length are for initial value(s) passed in.\n * If they are the correct length, they will be used directly. If not, they\n * will be concatenated over and over again into a new elements array. If\n * elements is NULL, the new elements array will not be initialized.\n */\nDENSE_STORAGE* nm_dense_storage_create(nm::dtype_t dtype, size_t* shape, size_t dim, void* elements, size_t elements_length) {\n  if (dtype == nm::RUBYOBJ)\n    nm_register_values(reinterpret_cast<VALUE*>(elements), elements_length);\n\n  DENSE_STORAGE* s = nm_dense_storage_create_dummy(dtype, shape, dim);\n  size_t count  = nm_storage_count_max_elements(s);\n\n  if (elements_length == count) {\n    s->elements = elements;\n    \n    if (dtype == nm::RUBYOBJ)\n      nm_unregister_values(reinterpret_cast<VALUE*>(elements), elements_length);\n\n  } else {\n\n    s->elements = NM_ALLOC_N(char, DTYPE_SIZES[dtype]*count);\n\n    if (dtype == nm::RUBYOBJ)\n      nm_unregister_values(reinterpret_cast<VALUE*>(elements), elements_length);\n\n    size_t copy_length = elements_length;\n\n    if (elements_length > 0) {\n      // Repeat elements over and over again until the end of the matrix.\n      for (size_t i = 0; i < count; i += elements_length) {\n\n        if (i + elements_length > count) {\n          copy_length = count - i;\n        }\n\n        memcpy((char*)(s->elements)+i*DTYPE_SIZES[dtype], (char*)(elements)+(i % elements_length)*DTYPE_SIZES[dtype], copy_length*DTYPE_SIZES[dtype]);\n      }\n\n      // Get rid of the init_val.\n      NM_FREE(elements);\n    }\n  }\n\n  return s;\n}\n\n\n/*\n * Destructor for dense storage. Make sure when you update this you also update nm_dense_storage_delete_dummy.\n */\nvoid nm_dense_storage_delete(STORAGE* s) {\n  // Sometimes Ruby passes in NULL storage for some reason (probably on copy construction failure).\n  if (s) {\n    DENSE_STORAGE* storage = (DENSE_STORAGE*)s;\n    if(storage->count-- == 1) {\n      NM_FREE(storage->shape);\n      NM_FREE(storage->offset);\n      NM_FREE(storage->stride);\n      if (storage->elements != NULL) {// happens with dummy objects\n        NM_FREE(storage->elements);\n      }\n      NM_FREE(storage);\n    }\n  }\n}\n\n/*\n * Destructor for dense storage references (slicing).\n */\nvoid nm_dense_storage_delete_ref(STORAGE* s) {\n  // Sometimes Ruby passes in NULL storage for some reason (probably on copy construction failure).\n  if (s) {\n    DENSE_STORAGE* storage = (DENSE_STORAGE*)s;\n    nm_dense_storage_delete( reinterpret_cast<STORAGE*>(storage->src) );\n    NM_FREE(storage->shape);\n    NM_FREE(storage->offset);\n    NM_FREE(storage);\n  }\n}\n\n/*\n * Mark values in a dense matrix for garbage collection. This may not be necessary -- further testing required.\n */\nvoid nm_dense_storage_mark(STORAGE* storage_base) {\n\n  DENSE_STORAGE* storage = (DENSE_STORAGE*)storage_base;\n\n  if (storage && storage->dtype == nm::RUBYOBJ) {\n    VALUE* els = reinterpret_cast<VALUE*>(storage->elements);\n\n    if (els) {\n      rb_gc_mark_locations(els, &(els[nm_storage_count_max_elements(storage)-1]));\n    }\n    //for (size_t index = nm_storage_count_max_elements(storage); index-- > 0;) {\n    //  rb_gc_mark(els[index]);\n    //}\n  }\n}\n\n/**\n * Register a dense storage struct as in-use to avoid garbage collection of the\n * elements stored.\n *\n * This function will check dtype and ignore non-object dtype, so its safe to pass any dense storage in.\n *\n */\nvoid nm_dense_storage_register(const STORAGE* s) {\n  const DENSE_STORAGE* storage = reinterpret_cast<const DENSE_STORAGE*>(s);\n  if (storage->dtype == nm::RUBYOBJ && storage->elements) {\n    nm_register_values(reinterpret_cast<VALUE*>(storage->elements), nm_storage_count_max_elements(storage));\n  }\n}\n\n/**\n * Unregister a dense storage struct to allow normal garbage collection of the\n * elements stored.\n *\n * This function will check dtype and ignore non-object dtype, so its safe to pass any dense storage in.\n *\n */\nvoid nm_dense_storage_unregister(const STORAGE* s) {\n  const DENSE_STORAGE* storage = reinterpret_cast<const DENSE_STORAGE*>(s);\n  if (storage->dtype == nm::RUBYOBJ && storage->elements) {\n    nm_unregister_values(reinterpret_cast<VALUE*>(storage->elements), nm_storage_count_max_elements(storage));\n  }\n}\n\n///////////////\n// Accessors //\n///////////////\n\n\n\n/*\n * map_pair iterator for dense matrices (for element-wise operations)\n */\nVALUE nm_dense_map_pair(VALUE self, VALUE right) {\n\n  NM_CONSERVATIVE(nm_register_value(&self));\n  NM_CONSERVATIVE(nm_register_value(&right));\n\n  RETURN_SIZED_ENUMERATOR_PRE\n  NM_CONSERVATIVE(nm_unregister_value(&right));\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n  RETURN_SIZED_ENUMERATOR(self, 0, 0, nm_enumerator_length);\n\n  DENSE_STORAGE *s = NM_STORAGE_DENSE(self),\n                *t = NM_STORAGE_DENSE(right);\n\n  size_t* coords = NM_ALLOCA_N(size_t, s->dim);\n  memset(coords, 0, sizeof(size_t) * s->dim);\n\n  size_t *shape_copy = NM_ALLOC_N(size_t, s->dim);\n  memcpy(shape_copy, s->shape, sizeof(size_t) * s->dim);\n\n  size_t count = nm_storage_count_max_elements(s);\n\n  DENSE_STORAGE* result = nm_dense_storage_create(nm::RUBYOBJ, shape_copy, s->dim, NULL, 0);\n\n  VALUE* result_elem = reinterpret_cast<VALUE*>(result->elements);\n  nm_dense_storage_register(result);\n\n  for (size_t k = 0; k < count; ++k) {\n    nm_dense_storage_coords(result, k, coords);\n    size_t s_index = nm_dense_storage_pos(s, coords),\n           t_index = nm_dense_storage_pos(t, coords);\n\n    VALUE sval = NM_DTYPE(self) == nm::RUBYOBJ ? reinterpret_cast<VALUE*>(s->elements)[s_index] : nm::rubyobj_from_cval((char*)(s->elements) + s_index*DTYPE_SIZES[NM_DTYPE(self)], NM_DTYPE(self)).rval;\n    nm_register_value(&sval);\n    VALUE tval = NM_DTYPE(right) == nm::RUBYOBJ ? reinterpret_cast<VALUE*>(t->elements)[t_index] : nm::rubyobj_from_cval((char*)(t->elements) + t_index*DTYPE_SIZES[NM_DTYPE(right)], NM_DTYPE(right)).rval;\n    result_elem[k] = rb_yield_values(2, sval, tval);\n    nm_unregister_value(&sval);\n  }\n\n  VALUE klass = CLASS_OF(self);\n  NMATRIX* m = nm_create(nm::DENSE_STORE, reinterpret_cast<STORAGE*>(result));\n  nm_register_nmatrix(m);\n  VALUE to_return = Data_Wrap_Struct(klass, nm_mark, nm_delete, m);\n\n  nm_unregister_nmatrix(m);\n  nm_dense_storage_unregister(result);\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n  NM_CONSERVATIVE(nm_unregister_value(&right));\n\n  return to_return;\n\n}\n\n/*\n * map enumerator for dense matrices.\n */\nVALUE nm_dense_map(VALUE self) {\n\n  NM_CONSERVATIVE(nm_register_value(&self));\n\n  RETURN_SIZED_ENUMERATOR_PRE\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n  RETURN_SIZED_ENUMERATOR(self, 0, 0, nm_enumerator_length);\n\n  DENSE_STORAGE *s = NM_STORAGE_DENSE(self);\n\n  size_t* coords = NM_ALLOCA_N(size_t, s->dim);\n  memset(coords, 0, sizeof(size_t) * s->dim);\n\n  size_t *shape_copy = NM_ALLOC_N(size_t, s->dim);\n  memcpy(shape_copy, s->shape, sizeof(size_t) * s->dim);\n\n  size_t count = nm_storage_count_max_elements(s);\n\n  DENSE_STORAGE* result = nm_dense_storage_create(nm::RUBYOBJ, shape_copy, s->dim, NULL, 0);\n\n  VALUE* result_elem = reinterpret_cast<VALUE*>(result->elements);\n\n  nm_dense_storage_register(result);\n\n  for (size_t k = 0; k < count; ++k) {\n    nm_dense_storage_coords(result, k, coords);\n    size_t s_index = nm_dense_storage_pos(s, coords);\n\n    result_elem[k] = rb_yield(NM_DTYPE(self) == nm::RUBYOBJ ? reinterpret_cast<VALUE*>(s->elements)[s_index] : nm::rubyobj_from_cval((char*)(s->elements) + s_index*DTYPE_SIZES[NM_DTYPE(self)], NM_DTYPE(self)).rval);\n  }\n\n  VALUE klass = CLASS_OF(self);\n\n  NMATRIX* m = nm_create(nm::DENSE_STORE, reinterpret_cast<STORAGE*>(result));\n  nm_register_nmatrix(m);\n\n  VALUE to_return = Data_Wrap_Struct(klass, nm_mark, nm_delete, m);\n\n  nm_unregister_nmatrix(m);\n  nm_dense_storage_unregister(result);\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n\n  return to_return;\n}\n\n\n/*\n * each_with_indices iterator for dense matrices.\n */\nVALUE nm_dense_each_with_indices(VALUE nmatrix) {\n\n  NM_CONSERVATIVE(nm_register_value(&nmatrix));\n  \n  RETURN_SIZED_ENUMERATOR_PRE\n  NM_CONSERVATIVE(nm_unregister_value(&nmatrix));\n  RETURN_SIZED_ENUMERATOR(nmatrix, 0, 0, nm_enumerator_length); // fourth argument only used by Ruby2+\n  DENSE_STORAGE* s = NM_STORAGE_DENSE(nmatrix);\n\n  // Create indices and initialize them to zero\n  size_t* coords = NM_ALLOCA_N(size_t, s->dim);\n  memset(coords, 0, sizeof(size_t) * s->dim);\n\n  size_t slice_index;\n  size_t* shape_copy = NM_ALLOC_N(size_t, s->dim);\n  memcpy(shape_copy, s->shape, sizeof(size_t) * s->dim);\n\n  DENSE_STORAGE* sliced_dummy = nm_dense_storage_create_dummy(s->dtype, shape_copy, s->dim);\n\n  for (size_t k = 0; k < nm_storage_count_max_elements(s); ++k) {\n    nm_dense_storage_coords(sliced_dummy, k, coords);\n    slice_index = nm_dense_storage_pos(s, coords);\n    VALUE ary = rb_ary_new();\n    nm_register_value(&ary);\n    if (NM_DTYPE(nmatrix) == nm::RUBYOBJ) rb_ary_push(ary, reinterpret_cast<VALUE*>(s->elements)[slice_index]);\n    else rb_ary_push(ary, nm::rubyobj_from_cval((char*)(s->elements) + slice_index*DTYPE_SIZES[NM_DTYPE(nmatrix)], NM_DTYPE(nmatrix)).rval);\n\n    for (size_t p = 0; p < s->dim; ++p) {\n      rb_ary_push(ary, INT2FIX(coords[p]));\n    }\n\n    // yield the array which now consists of the value and the indices\n    rb_yield(ary);\n    nm_unregister_value(&ary);\n  }\n\n  nm_dense_storage_delete(sliced_dummy);\n\n  NM_CONSERVATIVE(nm_unregister_value(&nmatrix));\n\n  return nmatrix;\n\n}\n\n\n/*\n * Borrowed this function from NArray. Handles 'each' iteration on a dense\n * matrix.\n *\n * Additionally, handles separately matrices containing VALUEs and matrices\n * containing other types of data.\n */\nVALUE nm_dense_each(VALUE nmatrix) {\n\n  NM_CONSERVATIVE(nm_register_value(&nmatrix));\n\n  RETURN_SIZED_ENUMERATOR_PRE\n  NM_CONSERVATIVE(nm_unregister_value(&nmatrix));\n  RETURN_SIZED_ENUMERATOR(nmatrix, 0, 0, nm_enumerator_length);\n\n  DENSE_STORAGE* s = NM_STORAGE_DENSE(nmatrix);\n\n  size_t* temp_coords = NM_ALLOCA_N(size_t, s->dim);\n  size_t sliced_index;\n  size_t* shape_copy = NM_ALLOC_N(size_t, s->dim);\n  memcpy(shape_copy, s->shape, sizeof(size_t) * s->dim);\n  DENSE_STORAGE* sliced_dummy = nm_dense_storage_create_dummy(s->dtype, shape_copy, s->dim);\n\n  if (NM_DTYPE(nmatrix) == nm::RUBYOBJ) {\n\n    // matrix of Ruby objects -- yield those objects directly\n    for (size_t i = 0; i < nm_storage_count_max_elements(s); ++i) {\n      nm_dense_storage_coords(sliced_dummy, i, temp_coords);\n      sliced_index = nm_dense_storage_pos(s, temp_coords);\n      rb_yield( reinterpret_cast<VALUE*>(s->elements)[sliced_index] );\n    }\n\n  } else {\n\n    // We're going to copy the matrix element into a Ruby VALUE and then operate on it. This way user can't accidentally\n    // modify it and cause a seg fault.\n    for (size_t i = 0; i < nm_storage_count_max_elements(s); ++i) {\n      nm_dense_storage_coords(sliced_dummy, i, temp_coords);\n      sliced_index = nm_dense_storage_pos(s, temp_coords);\n      VALUE v = nm::rubyobj_from_cval((char*)(s->elements) + sliced_index*DTYPE_SIZES[NM_DTYPE(nmatrix)], NM_DTYPE(nmatrix)).rval;\n      rb_yield( v ); // yield to the copy we made\n    }\n  }\n\n  nm_dense_storage_delete(sliced_dummy);\n  NM_CONSERVATIVE(nm_unregister_value(&nmatrix));\n\n  return nmatrix;\n\n}\n\n\n/*\n * Non-templated version of nm::dense_storage::slice_copy\n */\nstatic void slice_copy(DENSE_STORAGE *dest, const DENSE_STORAGE *src, size_t* lengths, size_t pdest, size_t psrc, size_t n) {\n  NAMED_LR_DTYPE_TEMPLATE_TABLE(slice_copy_table, nm::dense_storage::slice_copy, void, DENSE_STORAGE*, const DENSE_STORAGE*, size_t*, size_t, size_t, size_t)\n\n  slice_copy_table[dest->dtype][src->dtype](dest, src, lengths, pdest, psrc, n);\n}\n\n\n/*\n * Get a slice or one element, using copying.\n *\n * FIXME: Template the first condition.\n */\nvoid* nm_dense_storage_get(const STORAGE* storage, SLICE* slice) {\n  DENSE_STORAGE* s = (DENSE_STORAGE*)storage;\n  if (slice->single)\n    return (char*)(s->elements) + nm_dense_storage_pos(s, slice->coords) * DTYPE_SIZES[s->dtype];\n  else {\n    nm_dense_storage_register(s);\n    size_t *shape      = NM_ALLOC_N(size_t, s->dim);\n    for (size_t i = 0; i < s->dim; ++i) {\n      shape[i]  = slice->lengths[i];\n    }\n\n    DENSE_STORAGE* ns = nm_dense_storage_create(s->dtype, shape, s->dim, NULL, 0);\n\n    slice_copy(ns,\n        reinterpret_cast<const DENSE_STORAGE*>(s->src),\n        slice->lengths,\n        0,\n        nm_dense_storage_pos(s, slice->coords),\n        0);\n\n    nm_dense_storage_unregister(s);\n    return ns;\n  }\n}\n\n/*\n * Get a slice or one element by reference (no copy).\n *\n * FIXME: Template the first condition.\n */\nvoid* nm_dense_storage_ref(const STORAGE* storage, SLICE* slice) {\n  DENSE_STORAGE* s = (DENSE_STORAGE*)storage;\n\n  if (slice->single)\n    return (char*)(s->elements) + nm_dense_storage_pos(s, slice->coords) * DTYPE_SIZES[s->dtype];\n\n  else {\n    nm_dense_storage_register(s);\n    DENSE_STORAGE* ns = NM_ALLOC( DENSE_STORAGE );\n    ns->dim        = s->dim;\n    ns->dtype      = s->dtype;\n    ns->offset     = NM_ALLOC_N(size_t, ns->dim);\n    ns->shape      = NM_ALLOC_N(size_t, ns->dim);\n\n    for (size_t i = 0; i < ns->dim; ++i) {\n      ns->offset[i] = slice->coords[i] + s->offset[i];\n      ns->shape[i]  = slice->lengths[i];\n    }\n\n    ns->stride     = s->stride;\n    ns->elements   = s->elements;\n\n    s->src->count++;\n    ns->src = s->src;\n\n    nm_dense_storage_unregister(s);\n    return ns;\n  }\n}\n\n\n\n\n/*\n * Set a value or values in a dense matrix. Requires that right be either a single value or an NMatrix (ref or real).\n */\nvoid nm_dense_storage_set(VALUE left, SLICE* slice, VALUE right) {\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::dense_storage::set, void, VALUE, SLICE*, VALUE)\n  nm::dtype_t dtype = NM_DTYPE(left);\n  ttable[dtype](left, slice, right);\n}\n\n\n///////////\n// Tests //\n///////////\n\n/*\n * Do these two dense matrices have the same contents?\n *\n * TODO: Test the shape of the two matrices.\n * TODO: See if using memcmp is faster when the left- and right-hand matrices\n *        have the same dtype.\n */\nbool nm_dense_storage_eqeq(const STORAGE* left, const STORAGE* right) {\n  LR_DTYPE_TEMPLATE_TABLE(nm::dense_storage::eqeq, bool, const DENSE_STORAGE*, const DENSE_STORAGE*)\n\n  if (!ttable[left->dtype][right->dtype]) {\n    rb_raise(nm_eDataTypeError, \"comparison between these dtypes is undefined\");\n    return false;\n  }\n\n  return ttable[left->dtype][right->dtype]((const DENSE_STORAGE*)left, (const DENSE_STORAGE*)right);\n}\n\n/*\n * Test to see if the matrix is Hermitian.  If the matrix does not have a\n * dtype of Complex64 or Complex128 this is the same as testing for symmetry.\n */\nbool nm_dense_storage_is_hermitian(const DENSE_STORAGE* mat, int lda) {\n  if (mat->dtype == nm::COMPLEX64) {\n    return nm::dense_storage::is_hermitian<nm::Complex64>(mat, lda);\n\n  } else if (mat->dtype == nm::COMPLEX128) {\n    return nm::dense_storage::is_hermitian<nm::Complex128>(mat, lda);\n\n  } else {\n    return nm_dense_storage_is_symmetric(mat, lda);\n  }\n}\n\n/*\n * Is this dense matrix symmetric about the diagonal?\n */\nbool nm_dense_storage_is_symmetric(const DENSE_STORAGE* mat, int lda) {\n  DTYPE_TEMPLATE_TABLE(nm::dense_storage::is_symmetric, bool, const DENSE_STORAGE*, int);\n\n  return ttable[mat->dtype](mat, lda);\n}\n\n//////////\n// Math //\n//////////\n\n\n/*\n * Dense matrix-matrix multiplication.\n */\nSTORAGE* nm_dense_storage_matrix_multiply(const STORAGE_PAIR& casted_storage, size_t* resulting_shape, bool vector) {\n  DTYPE_TEMPLATE_TABLE(nm::dense_storage::matrix_multiply, DENSE_STORAGE*, const STORAGE_PAIR& casted_storage, size_t* resulting_shape, bool vector);\n\n  return ttable[casted_storage.left->dtype](casted_storage, resulting_shape, vector);\n}\n\n/////////////\n// Utility //\n/////////////\n\n/*\n * Determine the linear array position (in elements of s) of some set of coordinates\n * (given by slice).\n */\nsize_t nm_dense_storage_pos(const DENSE_STORAGE* s, const size_t* coords) {\n  size_t pos = 0;\n\n  for (size_t i = 0; i < s->dim; ++i)\n    pos += (coords[i] + s->offset[i]) * s->stride[i];\n\n  return pos;\n\n}\n\n/*\n * Determine the a set of slice coordinates from linear array position (in elements\n * of s) of some set of coordinates (given by slice).  (Inverse of\n * nm_dense_storage_pos).\n *\n * The parameter coords_out should be a pre-allocated array of size equal to s->dim.\n */\nvoid nm_dense_storage_coords(const DENSE_STORAGE* s, const size_t slice_pos, size_t* coords_out) {\n\n  size_t temp_pos = slice_pos;\n\n  for (size_t i = 0; i < s->dim; ++i) {\n    coords_out[i] = (temp_pos - temp_pos % s->stride[i])/s->stride[i] - s->offset[i];\n    temp_pos = temp_pos % s->stride[i];\n  }\n}\n\n/*\n * Calculate the stride length.\n */\nstatic size_t* stride(size_t* shape, size_t dim) {\n  size_t i, j;\n  size_t* stride = NM_ALLOC_N(size_t, dim);\n\n  for (i = 0; i < dim; ++i) {\n    stride[i] = 1;\n    for (j = i+1; j < dim; ++j) {\n      stride[i] *= shape[j];\n    }\n  }\n\n  return stride;\n}\n\n\n/////////////////////////\n// Copying and Casting //\n/////////////////////////\n\n/*\n * Copy dense storage, changing dtype if necessary.\n */\nSTORAGE* nm_dense_storage_cast_copy(const STORAGE* rhs, nm::dtype_t new_dtype, void* dummy) {\n  NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::dense_storage::cast_copy, DENSE_STORAGE*, const DENSE_STORAGE* rhs, nm::dtype_t new_dtype);\n\n  if (!ttable[new_dtype][rhs->dtype]) {\n    rb_raise(nm_eDataTypeError, \"cast between these dtypes is undefined\");\n    return NULL;\n  }\n\n  return (STORAGE*)ttable[new_dtype][rhs->dtype]((DENSE_STORAGE*)rhs, new_dtype);\n}\n\n/*\n * Copy dense storage without a change in dtype.\n */\nDENSE_STORAGE* nm_dense_storage_copy(const DENSE_STORAGE* rhs) {\n  nm_dense_storage_register(rhs);\n\n  size_t  count = 0;\n  size_t *shape  = NM_ALLOC_N(size_t, rhs->dim);\n\n  // copy shape and offset\n  for (size_t i = 0; i < rhs->dim; ++i) {\n    shape[i]  = rhs->shape[i];\n  }\n\n  DENSE_STORAGE* lhs = nm_dense_storage_create(rhs->dtype, shape, rhs->dim, NULL, 0);\n  count = nm_storage_count_max_elements(lhs);\n\n\n  // Ensure that allocation worked before copying.\n  if (lhs && count) {\n    if (rhs == rhs->src) // not a reference\n      memcpy(lhs->elements, rhs->elements, DTYPE_SIZES[rhs->dtype] * count);\n    else { // slice whole matrix\n      nm_dense_storage_register(lhs);\n      size_t *offset = NM_ALLOC_N(size_t, rhs->dim);\n      memset(offset, 0, sizeof(size_t) * rhs->dim);\n\n      slice_copy(lhs,\n           reinterpret_cast<const DENSE_STORAGE*>(rhs->src),\n           rhs->shape,\n           0,\n           nm_dense_storage_pos(rhs, offset),\n           0);\n\n      nm_dense_storage_unregister(lhs);\n    }\n  }\n\n  nm_dense_storage_unregister(rhs);\n\n  return lhs;\n}\n\n\n/*\n * Transpose dense storage into a new dense storage object. Basically a copy constructor.\n *\n * Not much point in templating this as it's pretty straight-forward.\n */\nSTORAGE* nm_dense_storage_copy_transposed(const STORAGE* rhs_base) {\n  DENSE_STORAGE* rhs = (DENSE_STORAGE*)rhs_base;\n\n  nm_dense_storage_register(rhs);\n  size_t *shape = NM_ALLOC_N(size_t, rhs->dim);\n\n  // swap shape\n  shape[0] = rhs->shape[1];\n  shape[1] = rhs->shape[0];\n\n  DENSE_STORAGE *lhs = nm_dense_storage_create(rhs->dtype, shape, rhs->dim, NULL, 0);\n\n  nm_dense_storage_register(lhs);\n\n  if (rhs_base->src == rhs_base) {\n    nm_math_transpose_generic(rhs->shape[0], rhs->shape[1], rhs->elements, rhs->shape[1], lhs->elements, lhs->shape[1], DTYPE_SIZES[rhs->dtype]);\n  } else {\n    NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::dense_storage::ref_slice_copy_transposed, void, const DENSE_STORAGE* rhs, DENSE_STORAGE* lhs);\n\n    if (!ttable[lhs->dtype][rhs->dtype]) {\n      nm_dense_storage_unregister(rhs);\n      nm_dense_storage_unregister(lhs);      \n      rb_raise(nm_eDataTypeError, \"transposition between these dtypes is undefined\");\n    }\n\n    ttable[lhs->dtype][rhs->dtype](rhs, lhs);\n  }\n\n  nm_dense_storage_unregister(rhs);\n  nm_dense_storage_unregister(lhs);\n\n  return (STORAGE*)lhs;\n}\n\n} // end of extern \"C\" block\n\nnamespace nm {\n\n/*\n * Used for slice setting. Takes the right-hand of the equal sign, a single VALUE, and massages\n * it into the correct form if it's not already there (dtype, non-ref, dense). Returns a pair of the NMATRIX* and a\n * boolean. If the boolean is true, the calling function is responsible for calling nm_delete on the NMATRIX*.\n * Otherwise, the NMATRIX* still belongs to Ruby and Ruby will free it.\n */\nstd::pair<NMATRIX*,bool> interpret_arg_as_dense_nmatrix(VALUE right, nm::dtype_t dtype) {\n  NM_CONSERVATIVE(nm_register_value(&right));\n  if (IsNMatrixType(right)) {\n    NMATRIX *r;\n    if (NM_STYPE(right) != DENSE_STORE || NM_DTYPE(right) != dtype || NM_SRC(right) != NM_STORAGE(right)) {\n      UnwrapNMatrix( right, r );\n      NMATRIX* ldtype_r = nm_cast_with_ctype_args(r, nm::DENSE_STORE, dtype, NULL);\n      NM_CONSERVATIVE(nm_unregister_value(&right));\n      return std::make_pair(ldtype_r,true);\n    } else {  // simple case -- right-hand matrix is dense and is not a reference and has same dtype\n      UnwrapNMatrix( right, r );\n      NM_CONSERVATIVE(nm_unregister_value(&right));\n      return std::make_pair(r, false);\n    }\n    // Do not set v_alloc = true for either of these. It is the responsibility of r/ldtype_r\n  } else if (RB_TYPE_P(right, T_DATA)) {\n    NM_CONSERVATIVE(nm_unregister_value(&right));\n    rb_raise(rb_eTypeError, \"unrecognized type for slice assignment\");\n  }\n\n  NM_CONSERVATIVE(nm_unregister_value(&right));\n  return std::make_pair<NMATRIX*,bool>(NULL, false);\n}\n\n\nnamespace dense_storage {\n\n/////////////////////////\n// Templated Functions //\n/////////////////////////\n\ntemplate<typename LDType, typename RDType>\nvoid ref_slice_copy_transposed(const DENSE_STORAGE* rhs, DENSE_STORAGE* lhs) {\n\n  nm_dense_storage_register(rhs);\n  nm_dense_storage_register(lhs);\n\n  LDType* lhs_els = reinterpret_cast<LDType*>(lhs->elements);\n  RDType* rhs_els = reinterpret_cast<RDType*>(rhs->elements);\n  size_t count = nm_storage_count_max_elements(lhs);;\n  size_t* temp_coords = NM_ALLOCA_N(size_t, lhs->dim);\n  size_t coord_swap_temp;\n\n  while (count-- > 0) {\n    nm_dense_storage_coords(lhs, count, temp_coords);\n    NM_SWAP(temp_coords[0], temp_coords[1], coord_swap_temp);\n    size_t r_coord = nm_dense_storage_pos(rhs, temp_coords);\n    lhs_els[count] = rhs_els[r_coord];\n  }\n\n  nm_dense_storage_unregister(rhs);\n  nm_dense_storage_unregister(lhs);\n\n}\n\ntemplate <typename LDType, typename RDType>\nDENSE_STORAGE* cast_copy(const DENSE_STORAGE* rhs, dtype_t new_dtype) {\n  nm_dense_storage_register(rhs);\n\n  size_t  count = nm_storage_count_max_elements(rhs);\n\n  size_t *shape = NM_ALLOC_N(size_t, rhs->dim);\n  memcpy(shape, rhs->shape, sizeof(size_t) * rhs->dim);\n\n  DENSE_STORAGE* lhs = nm_dense_storage_create(new_dtype, shape, rhs->dim, NULL, 0);\n\n  nm_dense_storage_register(lhs);\n\n  // Ensure that allocation worked before copying.\n  if (lhs && count) {\n    if (rhs->src != rhs) { // Make a copy of a ref to a matrix.\n      size_t* offset      = NM_ALLOCA_N(size_t, rhs->dim);\n      memset(offset, 0, sizeof(size_t) * rhs->dim);\n\n      slice_copy(lhs, reinterpret_cast<const DENSE_STORAGE*>(rhs->src),\n                 rhs->shape, 0,\n                 nm_dense_storage_pos(rhs, offset), 0);\n\n    } else {              // Make a regular copy.\n      RDType* rhs_els          = reinterpret_cast<RDType*>(rhs->elements);\n      LDType* lhs_els          = reinterpret_cast<LDType*>(lhs->elements);\n\n      for (size_t i = 0; i < count; ++i)\n        lhs_els[i] = rhs_els[i];\n    }\n  }\n\n  nm_dense_storage_unregister(rhs);\n  nm_dense_storage_unregister(lhs);\n\n  return lhs;\n}\n\ntemplate <typename LDType, typename RDType>\nbool eqeq(const DENSE_STORAGE* left, const DENSE_STORAGE* right) {\n  nm_dense_storage_register(left);\n  nm_dense_storage_register(right);\n\n  size_t index;\n  DENSE_STORAGE *tmp1, *tmp2;\n  tmp1 = NULL; tmp2 = NULL;\n  bool result = true;\n  /* FIXME: Very strange behavior! The GC calls the method directly with non-initialized data. */\n\n  LDType* left_elements    = (LDType*)left->elements;\n  RDType* right_elements  = (RDType*)right->elements;\n\n  // Copy elements in temp matrix if you have reference to the right.\n  if (left->src != left) {\n    tmp1 = nm_dense_storage_copy(left);\n    nm_dense_storage_register(tmp1);\n    left_elements = (LDType*)tmp1->elements;\n  }\n  if (right->src != right) {\n    tmp2 = nm_dense_storage_copy(right);\n    nm_dense_storage_register(tmp2);\n    right_elements = (RDType*)tmp2->elements;\n  }\n\n\n\n  for (index = nm_storage_count_max_elements(left); index-- > 0;) {\n    if (left_elements[index] != right_elements[index]) {\n      result = false;\n      break;\n    }\n  }\n\n  if (tmp1) {\n    nm_dense_storage_unregister(tmp1);\n    NM_FREE(tmp1);\n  }\n  if (tmp2) {\n    nm_dense_storage_unregister(tmp2);\n    NM_FREE(tmp2);\n  }\n\n  nm_dense_storage_unregister(left);\n  nm_dense_storage_unregister(right);\n  return result;\n}\n\ntemplate <typename DType>\nbool is_hermitian(const DENSE_STORAGE* mat, int lda) {\n  unsigned int i, j;\n  DType complex_conj;\n\n  const DType* els = (DType*) mat->elements;\n\n  for (i = mat->shape[0]; i-- > 0;) {\n    for (j = i + 1; j < mat->shape[1]; ++j) {\n      complex_conj    = els[j*lda + i];\n      complex_conj.i  = -complex_conj.i;\n\n      if (els[i*lda+j] != complex_conj) {\n        return false;\n      }\n    }\n  }\n\n  return true;\n}\n\ntemplate <typename DType>\nbool is_symmetric(const DENSE_STORAGE* mat, int lda) {\n  unsigned int i, j;\n  const DType* els = (DType*) mat->elements;\n\n  for (i = mat->shape[0]; i-- > 0;) {\n    for (j = i + 1; j < mat->shape[1]; ++j) {\n      if (els[i*lda+j] != els[j*lda+i]) {\n        return false;\n      }\n    }\n  }\n\n  return true;\n}\n\n\n\n/*\n * DType-templated matrix-matrix multiplication for dense storage.\n */\ntemplate <typename DType>\nstatic DENSE_STORAGE* matrix_multiply(const STORAGE_PAIR& casted_storage, size_t* resulting_shape, bool vector) {\n  DENSE_STORAGE *left  = (DENSE_STORAGE*)(casted_storage.left),\n                *right = (DENSE_STORAGE*)(casted_storage.right);\n\n  nm_dense_storage_register(left);\n  nm_dense_storage_register(right);\n\n  // Create result storage.\n  DENSE_STORAGE* result = nm_dense_storage_create(left->dtype, resulting_shape, 2, NULL, 0);\n\n  nm_dense_storage_register(result);\n\n  DType *pAlpha = NM_ALLOCA_N(DType, 1),\n        *pBeta  = NM_ALLOCA_N(DType, 1);\n\n  *pAlpha = 1;\n  *pBeta = 0;\n  // Do the multiplication\n  if (vector) nm::math::gemv<DType>(CblasNoTrans, left->shape[0], left->shape[1], pAlpha,\n                                    reinterpret_cast<DType*>(left->elements), left->shape[1],\n                                    reinterpret_cast<DType*>(right->elements), 1, pBeta,\n                                    reinterpret_cast<DType*>(result->elements), 1);\n  else        nm::math::gemm<DType>(CblasRowMajor, CblasNoTrans, CblasNoTrans, left->shape[0], right->shape[1], left->shape[1],\n                                    pAlpha, reinterpret_cast<DType*>(left->elements), left->shape[1],\n                                    reinterpret_cast<DType*>(right->elements), right->shape[1], pBeta,\n                                    reinterpret_cast<DType*>(result->elements), result->shape[1]);\n\n\n  nm_dense_storage_unregister(left);\n  nm_dense_storage_unregister(right);\n  nm_dense_storage_unregister(result);\n\n  return result;\n}\n\n}} // end of namespace nm::dense_storage\n"
  },
  {
    "path": "ext/nmatrix/storage/dense/dense.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == dense.h\n//\n// Dense n-dimensional matrix storage.\n\n#ifndef DENSE_H\n#define DENSE_H\n\n/*\n * Standard Includes\n */\n\n#include <ruby.h>\n#include <cstdlib>\n\n/*\n * Project Includes\n */\n\n#include \"types.h\"\n//#include \"util/math.h\"\n\n#include \"data/data.h\"\n\n#include \"../common.h\"\n\n#include \"nmatrix.h\"\n\n/*\n * Macros\n */\n\n/*\n * Types\n */\n\n/*\n * Data\n */\n\nextern \"C\" {\n\n/*\n * Functions\n */\n\n///////////////\n// Lifecycle //\n///////////////\n\nDENSE_STORAGE*  nm_dense_storage_create(nm::dtype_t dtype, size_t* shape, size_t dim, void* elements, size_t elements_length);\nvoid            nm_dense_storage_delete(STORAGE* s);\nvoid            nm_dense_storage_delete_ref(STORAGE* s);\nvoid            nm_dense_storage_mark(STORAGE*);\nvoid            nm_dense_storage_register(const STORAGE* s);\nvoid            nm_dense_storage_unregister(const STORAGE* s);\n\n\n///////////////\n// Accessors //\n///////////////\n\n\nVALUE nm_dense_map_pair(VALUE self, VALUE right);\nVALUE nm_dense_map(VALUE self);\nVALUE nm_dense_each(VALUE nmatrix);\nVALUE nm_dense_each_with_indices(VALUE nmatrix);\nvoid*  nm_dense_storage_get(const STORAGE* s, SLICE* slice);\nvoid*  nm_dense_storage_ref(const STORAGE* s, SLICE* slice);\nvoid  nm_dense_storage_set(VALUE left, SLICE* slice, VALUE right);\n\n///////////\n// Tests //\n///////////\n\nbool nm_dense_storage_eqeq(const STORAGE* left, const STORAGE* right);\nbool nm_dense_storage_is_symmetric(const DENSE_STORAGE* mat, int lda);\nbool nm_dense_storage_is_hermitian(const DENSE_STORAGE* mat, int lda);\n\n//////////\n// Math //\n//////////\n\nSTORAGE* nm_dense_storage_matrix_multiply(const STORAGE_PAIR& casted_storage, size_t* resulting_shape, bool vector);\n\n/////////////\n// Utility //\n/////////////\n\nsize_t nm_dense_storage_pos(const DENSE_STORAGE* s, const size_t* coords);\nvoid nm_dense_storage_coords(const DENSE_STORAGE* s, const size_t slice_pos, size_t* coords_out);\n\n/////////////////////////\n// Copying and Casting //\n/////////////////////////\n\nDENSE_STORAGE*  nm_dense_storage_copy(const DENSE_STORAGE* rhs);\nSTORAGE*        nm_dense_storage_copy_transposed(const STORAGE* rhs_base);\nSTORAGE*        nm_dense_storage_cast_copy(const STORAGE* rhs, nm::dtype_t new_dtype, void*);\n\n} // end of extern \"C\" block\n\nnamespace nm {\n  std::pair<NMATRIX*,bool> interpret_arg_as_dense_nmatrix(VALUE right, nm::dtype_t dtype);\n} // end of namespace nm\n\n#endif // DENSE_H\n"
  },
  {
    "path": "ext/nmatrix/storage/list/list.cpp",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == list.c\n//\n// List-of-lists n-dimensional matrix storage. Uses singly-linked\n// lists.\n/*\n * Standard Includes\n */\n\n#include <ruby.h>\n#include <algorithm> // std::min\n#include <iostream>\n#include <vector>\n#include <list>\n\n/*\n * Project Includes\n */\n\n#include \"../../types.h\"\n\n#include \"../../data/data.h\"\n\n#include \"../dense/dense.h\"\n#include \"../common.h\"\n#include \"list.h\"\n\n#include \"../../math/math.h\"\n#include \"../../util/sl_list.h\"\n\n/*\n * Macros\n */\n\n/*\n * Global Variables\n */\n\n\nextern \"C\" {\nstatic void slice_set_single(LIST_STORAGE* dest, LIST* l, void* val, size_t* coords, size_t* lengths, size_t n);\nstatic void __nm_list_storage_unregister_temp_value_list(std::list<VALUE*>& temp_vals);\nstatic void __nm_list_storage_unregister_temp_list_list(std::list<LIST*>& temp_vals, size_t recursions);\n}\n\nnamespace nm { namespace list_storage {\n\n/*\n * Forward Declarations\n */\n\nclass RecurseData {\npublic:\n  // Note that providing init_obj argument does not override init.\n  RecurseData(const LIST_STORAGE* s, VALUE init_obj__ = Qnil) : ref(s), actual(s), shape_(s->shape), offsets(s->dim, 0), init_(s->default_val), init_obj_(init_obj__) {\n    while (actual->src != actual) {\n      for (size_t i = 0; i < s->dim; ++i) // update offsets as we recurse\n        offsets[i] += actual->offset[i];\n      actual = reinterpret_cast<LIST_STORAGE*>(actual->src);\n    }\n    nm_list_storage_register(actual);\n    nm_list_storage_register(ref);\n    actual_shape_ = actual->shape;\n\n    if (init_obj_ == Qnil) {\n      init_obj_ = s->dtype == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(s->default_val) : nm::rubyobj_from_cval(s->default_val, s->dtype).rval;\n    }\n    nm_register_value(&init_obj_);\n  }\n\n  ~RecurseData() {\n    nm_unregister_value(&init_obj_);\n    nm_list_storage_unregister(ref);\n    nm_list_storage_unregister(actual);\n  }\n\n  dtype_t dtype() const { return ref->dtype; }\n\n\n  size_t dim() const { return ref->dim; }\n\n  size_t ref_shape(size_t rec) const {\n    return shape_[ref->dim - rec - 1];\n  }\n\n  size_t* copy_alloc_shape() const {\n    size_t* new_shape = NM_ALLOC_N(size_t, ref->dim);\n    memcpy(new_shape, shape_, sizeof(size_t)*ref->dim);\n    return new_shape;\n  }\n\n  size_t actual_shape(size_t rec) const {\n    return actual_shape_[actual->dim - rec - 1];\n  }\n\n  size_t offset(size_t rec) const {\n    return offsets[ref->dim - rec - 1];\n  }\n\n  void* init() const {\n    return init_;\n  }\n\n  VALUE init_obj() const { return init_obj_; }\n\n  LIST* top_level_list() const {\n    return reinterpret_cast<LIST*>(actual->rows);\n  }\n\n  const LIST_STORAGE* ref;\n  const LIST_STORAGE* actual;\n\n  size_t* shape_; // of ref\n  size_t* actual_shape_;\nprotected:\n  std::vector<size_t> offsets; // relative to actual\n  void* init_;\n  VALUE init_obj_;\n\n};\n\n\ntemplate <typename LDType, typename RDType>\nstatic LIST_STORAGE* cast_copy(const LIST_STORAGE* rhs, nm::dtype_t new_dtype);\n\ntemplate <typename LDType, typename RDType>\nstatic bool eqeq_r(RecurseData& left, RecurseData& right, const LIST* l, const LIST* r, size_t rec);\n\ntemplate <typename SDType, typename TDType>\nstatic bool eqeq_empty_r(RecurseData& s, const LIST* l, size_t rec, const TDType* t_init);\n\n/*\n * Recursive helper for map_merged_stored_r which handles the case where one list is empty and the other is not.\n */\nstatic void map_empty_stored_r(RecurseData& result, RecurseData& s, LIST* x, const LIST* l, size_t rec, bool rev, const VALUE& t_init) {\n  if (s.dtype() == nm::RUBYOBJ) {\n    nm_list_storage_register_list(l, rec);\n  }\n  if (result.dtype() == nm::RUBYOBJ) {\n    nm_list_storage_register_list(x, rec);\n  }\n\n  NODE *curr  = l->first,\n       *xcurr = NULL;\n\n  // For reference matrices, make sure we start in the correct place.\n  size_t offset   = s.offset(rec);\n  size_t x_shape  = s.ref_shape(rec);\n\n  while (curr && curr->key < offset) {  curr = curr->next;  }\n  if (curr && curr->key - offset >= x_shape) curr = NULL;\n\n  if (rec) {\n    std::list<LIST*> temp_vals;\n    while (curr) {\n      LIST* val = nm::list::create();\n      map_empty_stored_r(result, s, val, reinterpret_cast<const LIST*>(curr->val), rec-1, rev, t_init);\n\n      if (!val->first) nm::list::del(val, 0);\n      else {\n        nm_list_storage_register_list(val, rec-1);\n  temp_vals.push_front(val);\n        nm::list::insert_helper(x, xcurr, curr->key - offset, val);\n      }\n      curr = curr->next;\n      if (curr && curr->key - offset >= x_shape) curr = NULL;\n    }\n    __nm_list_storage_unregister_temp_list_list(temp_vals, rec-1);\n  } else {\n    std::list<VALUE*> temp_vals;\n    while (curr) {\n      VALUE val, s_val;\n      if (s.dtype() == nm::RUBYOBJ) s_val = (*reinterpret_cast<nm::RubyObject*>(curr->val)).rval;\n      else                          s_val = nm::rubyobj_from_cval(curr->val, s.dtype()).rval;\n\n      if (rev) val = rb_yield_values(2, t_init, s_val);\n      else     val = rb_yield_values(2, s_val, t_init);\n\n      nm_register_value(&val);\n\n      if (rb_funcall(val, rb_intern(\"!=\"), 1, result.init_obj()) == Qtrue) {\n        xcurr = nm::list::insert_helper(x, xcurr, curr->key - offset, val);\n        temp_vals.push_front(reinterpret_cast<VALUE*>(xcurr->val));\n        nm_register_value(&*reinterpret_cast<VALUE*>(xcurr->val));\n      }\n      nm_unregister_value(&val);\n\n      curr = curr->next;\n      if (curr && curr->key - offset >= x_shape) curr = NULL;\n    }\n    __nm_list_storage_unregister_temp_value_list(temp_vals);\n  }\n\n  if (s.dtype() == nm::RUBYOBJ){\n    nm_list_storage_unregister_list(l, rec);\n  }\n  if (result.dtype() == nm::RUBYOBJ) {\n    nm_list_storage_unregister_list(x, rec);\n  }\n\n}\n\n\n/*\n * Recursive helper function for nm_list_map_stored\n */\nstatic void map_stored_r(RecurseData& result, RecurseData& left, LIST* x, const LIST* l, size_t rec) {\n  if (left.dtype() == nm::RUBYOBJ) {\n    nm_list_storage_register_list(l, rec);\n  }\n  if (result.dtype() == nm::RUBYOBJ) {\n    nm_list_storage_register_list(x, rec);\n  }\n  NODE *lcurr = l->first,\n       *xcurr = x->first;\n\n  // For reference matrices, make sure we start in the correct place.\n  while (lcurr && lcurr->key < left.offset(rec))  {  lcurr = lcurr->next;  }\n\n  if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec))  lcurr = NULL;\n\n  if (rec) {\n    std::list<LIST*> temp_vals;\n    while (lcurr) {\n      size_t key;\n      LIST*  val = nm::list::create();\n      map_stored_r(result, left, val, reinterpret_cast<const LIST*>(lcurr->val), rec-1);\n      key        = lcurr->key - left.offset(rec);\n      lcurr      = lcurr->next;\n\n      if (!val->first) nm::list::del(val, 0); // empty list -- don't insert\n      else {\n        nm_list_storage_register_list(val, rec-1);\n        temp_vals.push_front(val);\n        xcurr = nm::list::insert_helper(x, xcurr, key, val);\n      }\n      if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;\n    }\n    __nm_list_storage_unregister_temp_list_list(temp_vals, rec-1);\n  } else {\n    std::list<VALUE*> temp_vals;\n    while (lcurr) {\n      size_t key;\n      VALUE  val;\n\n      val   = rb_yield_values(1, left.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(lcurr->val) : nm::rubyobj_from_cval(lcurr->val, left.dtype()).rval);\n      key   = lcurr->key - left.offset(rec);\n      lcurr = lcurr->next;\n\n      if (!rb_equal(val, result.init_obj())) {\n        xcurr = nm::list::insert_helper(x, xcurr, key, val);\n        temp_vals.push_front(reinterpret_cast<VALUE*>(xcurr->val));\n        nm_register_value(&*reinterpret_cast<VALUE*>(xcurr->val));\n      }\n\n      if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;\n    }\n    __nm_list_storage_unregister_temp_value_list(temp_vals);\n  }\n\n  if (left.dtype() == nm::RUBYOBJ) {\n    nm_list_storage_unregister_list(l, rec);\n  }\n  if (result.dtype() == nm::RUBYOBJ) {\n    nm_list_storage_unregister_list(x, rec);\n  }\n}\n\n\n\n/*\n * Recursive helper function for nm_list_map_merged_stored\n */\nstatic void map_merged_stored_r(RecurseData& result, RecurseData& left, RecurseData& right, LIST* x, const LIST* l, const LIST* r, size_t rec) {\n  if (left.dtype() == nm::RUBYOBJ) {\n    nm_list_storage_register_list(l, rec);\n  }\n  if (right.dtype() == nm::RUBYOBJ) {\n    nm_list_storage_register_list(r, rec);\n  }\n  if (result.dtype() == nm::RUBYOBJ) {\n    nm_list_storage_register_list(x, rec);\n  }\n\n\n  NODE *lcurr = l->first,\n       *rcurr = r->first,\n       *xcurr = x->first;\n\n  // For reference matrices, make sure we start in the correct place.\n  while (lcurr && lcurr->key < left.offset(rec))  {  lcurr = lcurr->next;  }\n  while (rcurr && rcurr->key < right.offset(rec)) {  rcurr = rcurr->next;  }\n\n  if (rcurr && rcurr->key - right.offset(rec) >= result.ref_shape(rec)) rcurr = NULL;\n  if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec))  lcurr = NULL;\n\n  if (rec) {\n    std::list<LIST*> temp_vals;\n    while (lcurr || rcurr) {\n      size_t key;\n      LIST*  val = nm::list::create();\n\n      if (!rcurr || (lcurr && (lcurr->key - left.offset(rec) < rcurr->key - right.offset(rec)))) {\n        map_empty_stored_r(result, left, val, reinterpret_cast<const LIST*>(lcurr->val), rec-1, false, right.init_obj());\n        key   = lcurr->key - left.offset(rec);\n        lcurr = lcurr->next;\n      } else if (!lcurr || (rcurr && (rcurr->key - right.offset(rec) < lcurr->key - left.offset(rec)))) {\n        map_empty_stored_r(result, right, val, reinterpret_cast<const LIST*>(rcurr->val), rec-1, true, left.init_obj());\n        key   = rcurr->key - right.offset(rec);\n        rcurr = rcurr->next;\n      } else { // == and both present\n        map_merged_stored_r(result, left, right, val, reinterpret_cast<const LIST*>(lcurr->val), reinterpret_cast<const LIST*>(rcurr->val), rec-1);\n        key   = lcurr->key - left.offset(rec);\n        lcurr = lcurr->next;\n        rcurr = rcurr->next;\n      }\n\n\n      if (!val->first) nm::list::del(val, 0); // empty list -- don't insert\n      else {\n        nm_list_storage_register_list(val, rec-1);\n        temp_vals.push_front(val);\n        xcurr = nm::list::insert_helper(x, xcurr, key, val);\n      }\n      if (rcurr && rcurr->key - right.offset(rec) >= result.ref_shape(rec)) rcurr = NULL;\n      if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;\n    }\n    __nm_list_storage_unregister_temp_list_list(temp_vals, rec-1);\n  } else {\n    std::list<VALUE*> temp_vals;\n    while (lcurr || rcurr) {\n      size_t key;\n      VALUE  val;\n\n      if (!rcurr || (lcurr && (lcurr->key - left.offset(rec) < rcurr->key - right.offset(rec)))) {\n        val   = rb_yield_values(2, nm::rubyobj_from_cval(lcurr->val, left.dtype()).rval, right.init_obj());\n        key   = lcurr->key - left.offset(rec);\n        lcurr = lcurr->next;\n      } else if (!lcurr || (rcurr && (rcurr->key - right.offset(rec) < lcurr->key - left.offset(rec)))) {\n        val   = rb_yield_values(2, left.init_obj(), nm::rubyobj_from_cval(rcurr->val, right.dtype()).rval);\n        key   = rcurr->key - right.offset(rec);\n        rcurr = rcurr->next;\n      } else { // == and both present\n        val   = rb_yield_values(2, nm::rubyobj_from_cval(lcurr->val, left.dtype()).rval, nm::rubyobj_from_cval(rcurr->val, right.dtype()).rval);\n        key   = lcurr->key - left.offset(rec);\n        lcurr = lcurr->next;\n        rcurr = rcurr->next;\n      }\n\n      nm_register_value(&val);\n\n      if (rb_funcall(val, rb_intern(\"!=\"), 1, result.init_obj()) == Qtrue) {\n        xcurr = nm::list::insert_helper(x, xcurr, key, val);\n        temp_vals.push_front(reinterpret_cast<VALUE*>(xcurr->val));\n        nm_register_value(&*reinterpret_cast<VALUE*>(xcurr->val));\n      }\n\n      nm_unregister_value(&val);\n\n      if (rcurr && rcurr->key - right.offset(rec) >= result.ref_shape(rec)) rcurr = NULL;\n      if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;\n    }\n    __nm_list_storage_unregister_temp_value_list(temp_vals);\n  }\n\n  if (left.dtype() == nm::RUBYOBJ) {\n    nm_list_storage_unregister_list(l, rec);\n  }\n  if (right.dtype() == nm::RUBYOBJ) {\n    nm_list_storage_unregister_list(r, rec);\n  }\n  if (result.dtype() == nm::RUBYOBJ) {\n    nm_list_storage_unregister_list(x, rec);\n  }\n}\n\n\n/*\n * Recursive function, sets multiple values in a matrix from multiple source values. Also handles removal; returns true\n * if the recursion results in an empty list at that level (which signals that the current parent should be removed).\n */\ntemplate <typename D>\nstatic bool slice_set(LIST_STORAGE* dest, LIST* l, size_t* coords, size_t* lengths, size_t n, D* v, size_t v_size, size_t& v_offset) {\n  using nm::list::node_is_within_slice;\n  using nm::list::remove_by_node;\n  using nm::list::find_preceding_from_list;\n  using nm::list::insert_first_list;\n  using nm::list::insert_first_node;\n  using nm::list::insert_after;\n  size_t* offsets = dest->offset;\n\n  nm_list_storage_register(dest);\n  if (dest->dtype == nm::RUBYOBJ) {\n    nm_register_values(reinterpret_cast<VALUE*>(v), v_size);\n    nm_list_storage_register_list(l, dest->dim - n - 1);\n  }\n\n  // drill down into the structure\n  NODE* prev = find_preceding_from_list(l, coords[n] + offsets[n]);\n  NODE* node = NULL;\n  if (prev) node = prev->next && node_is_within_slice(prev->next, coords[n] + offsets[n], lengths[n]) ? prev->next : NULL;\n  else      node = node_is_within_slice(l->first, coords[n] + offsets[n], lengths[n]) ? l->first : NULL;\n\n  if (dest->dim - n > 1) {\n    size_t i    = 0;\n    size_t key  = i + offsets[n] + coords[n];\n\n    // Make sure we have an element to work with\n    if (!node) {\n      if (!prev) {\n        node = insert_first_list(l, key, nm::list::create());\n      } else {\n        node = insert_after(prev, key, nm::list::create());\n      }\n    }\n\n    // At this point, it's guaranteed that there is a list here matching key.\n    std::list<LIST*> temp_lists;\n    while (node) {\n      // Recurse down into the list. If it returns true, it's empty, so we need to delete it.\n      bool remove_parent = slice_set(dest, reinterpret_cast<LIST*>(node->val), coords, lengths, n+1, v, v_size, v_offset);\n      if (dest->dtype == nm::RUBYOBJ) {\n        temp_lists.push_front(reinterpret_cast<LIST*>(node->val));\n        nm_list_storage_register_list(reinterpret_cast<LIST*>(node->val), dest->dim - n - 2);\n      }\n      if (remove_parent) {\n        NM_FREE(remove_by_node(l, prev, node));\n        if (prev) node = prev->next ? prev->next : NULL;\n        else      node = l->first   ? l->first   : NULL;\n      } else {  // move forward\n        prev = node;\n        node = node_is_within_slice(prev->next, key-i, lengths[n]) ? prev->next : NULL;\n      }\n\n      ++i; ++key;\n\n      if (i >= lengths[n]) break;\n\n      // Now do we need to insert another node here? Or is there already one?\n      if (!node) {\n        if (!prev) {\n          node = insert_first_list(l, key, nm::list::create());\n        } else {\n          node = insert_after(prev, key, nm::list::create());\n        }\n      }\n    }\n    __nm_list_storage_unregister_temp_list_list(temp_lists, dest->dim - n - 2);\n\n  } else {\n\n    size_t i    = 0;\n    size_t key  = i + offsets[n] + coords[n];\n    std::list<VALUE*> temp_vals;\n    while (i < lengths[n]) {\n      // Make sure we have an element to work with\n      if (v_offset >= v_size) v_offset %= v_size;\n\n      if (node) {\n        if (node->key == key) {\n          if (v[v_offset] == *reinterpret_cast<D*>(dest->default_val)) { // remove zero value\n\n            NM_FREE(remove_by_node(l, (prev ? prev : l->first), node));\n\n            if (prev) node = prev->next ? prev->next : NULL;\n            else      node = l->first   ? l->first   : NULL;\n\n          } else { // edit directly\n            *reinterpret_cast<D*>(node->val) = v[v_offset];\n            prev = node;\n            node = node->next ? node->next : NULL;\n          }\n        } else if (node->key > key) {\n          D* nv = NM_ALLOC(D); *nv = v[v_offset++];\n          if (dest->dtype == nm::RUBYOBJ) {\n            nm_register_value(&*reinterpret_cast<VALUE*>(nv));\n            temp_vals.push_front(reinterpret_cast<VALUE*>(nv));\n          }\n\n          if (prev) node = insert_after(prev, key, nv);\n          else      node = insert_first_node(l, key, nv, sizeof(D));\n\n          prev = node;\n          node = prev->next ? prev->next : NULL;\n        }\n      } else { // no node -- insert a new one\n        D* nv = NM_ALLOC(D); *nv = v[v_offset++];\n        if (dest->dtype == nm::RUBYOBJ) {\n          nm_register_value(&*reinterpret_cast<VALUE*>(nv));\n          temp_vals.push_front(reinterpret_cast<VALUE*>(nv));\n        }\n        if (prev) node = insert_after(prev, key, nv);\n        else      node = insert_first_node(l, key, nv, sizeof(D));\n\n        prev = node;\n        node = prev->next ? prev->next : NULL;\n      }\n\n      ++i; ++key;\n    }\n    __nm_list_storage_unregister_temp_value_list(temp_vals);\n  }\n\n  if (dest->dtype == nm::RUBYOBJ) {\n    nm_unregister_values(reinterpret_cast<VALUE*>(v), v_size);\n    nm_list_storage_unregister_list(l, dest->dim - n - 1);\n  }\n  nm_list_storage_unregister(dest);\n\n  return (l->first) ? false : true;\n}\n\n\ntemplate <typename D>\nvoid set(VALUE left, SLICE* slice, VALUE right) {\n  NM_CONSERVATIVE(nm_register_value(&left));\n  NM_CONSERVATIVE(nm_register_value(&right));\n  LIST_STORAGE* s = NM_STORAGE_LIST(left);\n\n  std::pair<NMATRIX*,bool> nm_and_free =\n    interpret_arg_as_dense_nmatrix(right, NM_DTYPE(left));\n\n  // Map the data onto D* v.\n  D*     v;\n  size_t v_size = 1;\n\n  if (nm_and_free.first) {\n    DENSE_STORAGE* t = reinterpret_cast<DENSE_STORAGE*>(nm_and_free.first->storage);\n    v                = reinterpret_cast<D*>(t->elements);\n    v_size           = nm_storage_count_max_elements(t);\n\n  } else if (RB_TYPE_P(right, T_ARRAY)) {\n    nm_register_nmatrix(nm_and_free.first);\n    v_size = RARRAY_LEN(right);\n    v      = NM_ALLOC_N(D, v_size);\n    if (NM_DTYPE(left) == nm::RUBYOBJ)\n        nm_register_values(reinterpret_cast<VALUE*>(v), v_size);\n\n    for (size_t m = 0; m < v_size; ++m) {\n      rubyval_to_cval(rb_ary_entry(right, m), s->dtype, &(v[m]));\n    }\n    if (NM_DTYPE(left) == nm::RUBYOBJ)\n        nm_unregister_values(reinterpret_cast<VALUE*>(v), v_size);\n\n  } else {\n    nm_register_nmatrix(nm_and_free.first);\n    v = reinterpret_cast<D*>(rubyobj_to_cval(right, NM_DTYPE(left)));\n  }\n\n  if (v_size == 1 && *v == *reinterpret_cast<D*>(s->default_val)) {\n    if (*reinterpret_cast<D*>(nm_list_storage_get(s, slice)) != *reinterpret_cast<D*>(s->default_val)) {\n      nm::list::remove_recursive(s->rows, slice->coords, s->offset, slice->lengths, 0, s->dim);\n    }\n  } else if (slice->single) {\n    slice_set_single(s, s->rows, reinterpret_cast<void*>(v), slice->coords, slice->lengths, 0);\n  } else {\n    size_t v_offset = 0;\n    slice_set<D>(s, s->rows, slice->coords, slice->lengths, 0, v, v_size, v_offset);\n  }\n\n\n  // Only free v if it was allocated in this function.\n  if (nm_and_free.first) {\n    if (nm_and_free.second) {\n      nm_delete(nm_and_free.first);\n    }\n  } else {\n    NM_FREE(v);\n    nm_unregister_nmatrix(nm_and_free.first);\n  }\n  NM_CONSERVATIVE(nm_unregister_value(&left));\n  NM_CONSERVATIVE(nm_unregister_value(&right));\n}\n\n/*\n * Used only to set a default initial value.\n */\ntemplate <typename D>\nvoid init_default(LIST_STORAGE* s) {\n  s->default_val = NM_ALLOC(D);\n  *reinterpret_cast<D*>(s->default_val) = 0;\n}\n\n\n}} // end of namespace list_storage\n\nextern \"C\" {\n\n/*\n * Functions\n */\n\n\n////////////////\n// Lifecycle //\n///////////////\n\n\n/*\n * Creates a list-of-lists(-of-lists-of-lists-etc) storage framework for a\n * matrix.\n *\n * Note: The pointers you pass in for shape and init_val become property of our\n * new storage. You don't need to free them, and you shouldn't re-use them.\n */\nLIST_STORAGE* nm_list_storage_create(nm::dtype_t dtype, size_t* shape, size_t dim, void* init_val) {\n  LIST_STORAGE* s = NM_ALLOC( LIST_STORAGE );\n\n  s->dim   = dim;\n  s->shape = shape;\n  s->dtype = dtype;\n\n  s->offset = NM_ALLOC_N(size_t, s->dim);\n  memset(s->offset, 0, s->dim * sizeof(size_t));\n\n  s->rows  = nm::list::create();\n  if (init_val)\n    s->default_val = init_val;\n  else {\n    DTYPE_TEMPLATE_TABLE(nm::list_storage::init_default, void, LIST_STORAGE*)\n    ttable[dtype](s);\n  }\n  s->count = 1;\n  s->src = s;\n\n  return s;\n}\n\n/*\n * Destructor for list storage.\n */\nvoid nm_list_storage_delete(STORAGE* s) {\n  if (s) {\n    LIST_STORAGE* storage = (LIST_STORAGE*)s;\n    if (storage->count-- == 1) {\n      nm::list::del( storage->rows, storage->dim - 1 );\n\n      NM_FREE(storage->shape);\n      NM_FREE(storage->offset);\n      NM_FREE(storage->default_val);\n      NM_FREE(s);\n    }\n  }\n}\n\n/*\n * Destructor for a list storage reference slice.\n */\nvoid nm_list_storage_delete_ref(STORAGE* s) {\n  if (s) {\n    LIST_STORAGE* storage = (LIST_STORAGE*)s;\n\n    nm_list_storage_delete( reinterpret_cast<STORAGE*>(storage->src ) );\n    NM_FREE(storage->shape);\n    NM_FREE(storage->offset);\n    NM_FREE(s);\n  }\n}\n\n/*\n * GC mark function for list storage.\n */\nvoid nm_list_storage_mark(STORAGE* storage_base) {\n  LIST_STORAGE* storage = (LIST_STORAGE*)storage_base;\n\n  if (storage && storage->dtype == nm::RUBYOBJ) {\n    rb_gc_mark(*((VALUE*)(storage->default_val)));\n    nm::list::mark(storage->rows, storage->dim - 1);\n  }\n}\n\nstatic void __nm_list_storage_unregister_temp_value_list(std::list<VALUE*>& temp_vals) {\n  for (std::list<VALUE*>::iterator it = temp_vals.begin(); it != temp_vals.end(); ++it) {\n    nm_unregister_value(&**it);\n  }\n}\n\nstatic void __nm_list_storage_unregister_temp_list_list(std::list<LIST*>& temp_vals, size_t recursions) {\n  for (std::list<LIST*>::iterator it = temp_vals.begin(); it != temp_vals.end(); ++it) {\n    nm_list_storage_unregister_list(*it, recursions);\n  }\n}\n\nvoid nm_list_storage_register_node(const NODE* curr) {\n  nm_register_value(&*reinterpret_cast<VALUE*>(curr->val));\n}\n\nvoid nm_list_storage_unregister_node(const NODE* curr) {\n  nm_unregister_value(&*reinterpret_cast<VALUE*>(curr->val));\n}\n\n/**\n * Gets rid of all instances of a given node in the registration list.\n * Sometimes a node will get deleted and replaced deep in a recursion, but\n * further up it will still get registered.  This leads to a potential read\n * after free during the GC marking.  This function completely clears out a\n * node so that this won't happen.\n */\nvoid nm_list_storage_completely_unregister_node(const NODE* curr) {\n  nm_completely_unregister_value(&*reinterpret_cast<VALUE*>(curr->val));\n}\n\nvoid nm_list_storage_register_list(const LIST* list, size_t recursions) {\n  NODE* next;\n  if (!list) return;\n  NODE* curr = list->first;\n\n  while (curr != NULL) {\n    next = curr->next;\n    if (recursions == 0) {\n      nm_list_storage_register_node(curr);\n    } else {\n      nm_list_storage_register_list(reinterpret_cast<LIST*>(curr->val), recursions - 1);\n    }\n    curr = next;\n  }\n}\n\nvoid nm_list_storage_unregister_list(const LIST* list, size_t recursions) {\n  NODE* next;\n  if (!list) return;\n  NODE* curr = list->first;\n\n  while (curr != NULL) {\n    next = curr->next;\n    if (recursions == 0) {\n      nm_list_storage_unregister_node(curr);\n    } else {\n      nm_list_storage_unregister_list(reinterpret_cast<LIST*>(curr->val), recursions - 1);\n    }\n    curr = next;\n  }\n}\n\nvoid nm_list_storage_register(const STORAGE* s) {\n  const LIST_STORAGE* storage = reinterpret_cast<const LIST_STORAGE*>(s);\n  if (storage && storage->dtype == nm::RUBYOBJ) {\n    nm_register_value(&*reinterpret_cast<VALUE*>(storage->default_val));\n    nm_list_storage_register_list(storage->rows, storage->dim - 1);\n  }\n}\n\nvoid nm_list_storage_unregister(const STORAGE* s) {\n  const LIST_STORAGE* storage = reinterpret_cast<const LIST_STORAGE*>(s);\n  if (storage && storage->dtype == nm::RUBYOBJ) {\n    nm_unregister_value(&*reinterpret_cast<VALUE*>(storage->default_val));\n    nm_list_storage_unregister_list(storage->rows, storage->dim - 1);\n  }\n}\n\n///////////////\n// Accessors //\n///////////////\n\n/*\n * Documentation goes here.\n */\nstatic NODE* list_storage_get_single_node(LIST_STORAGE* s, SLICE* slice) {\n  LIST* l = s->rows;\n  NODE* n;\n\n  for (size_t r = 0; r < s->dim; r++) {\n    n = nm::list::find(l, s->offset[r] + slice->coords[r]);\n\n    if (n) l = reinterpret_cast<LIST*>(n->val);\n    else return NULL;\n  }\n\n  return n;\n}\n\n\n/*\n * Recursive helper function for each_with_indices, based on nm_list_storage_count_elements_r.\n * Handles empty/non-existent sublists.\n */\nstatic void each_empty_with_indices_r(nm::list_storage::RecurseData& s, size_t rec, VALUE& stack) {\n  VALUE empty  = s.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(s.init()) : s.init_obj();\n  NM_CONSERVATIVE(nm_register_value(&stack));\n\n  if (rec) {\n    for (unsigned long index = 0; index < s.ref_shape(rec); ++index) {\n      // Don't do an unshift/shift here -- we'll let that be handled in the lowest-level iteration (recursions == 0)\n      rb_ary_push(stack, LONG2NUM(index));\n      each_empty_with_indices_r(s, rec-1, stack);\n      rb_ary_pop(stack);\n    }\n  } else {\n    rb_ary_unshift(stack, empty);\n    for (unsigned long index = 0; index < s.ref_shape(rec); ++index) {\n      rb_ary_push(stack, LONG2NUM(index));\n      rb_yield_splat(stack);\n      rb_ary_pop(stack);\n    }\n    rb_ary_shift(stack);\n  }\n  NM_CONSERVATIVE(nm_unregister_value(&stack));\n}\n\n/*\n * Recursive helper function for each_with_indices, based on nm_list_storage_count_elements_r.\n */\nstatic void each_with_indices_r(nm::list_storage::RecurseData& s, const LIST* l, size_t rec, VALUE& stack) {\n  if (s.dtype() == nm::RUBYOBJ)\n    nm_list_storage_register_list(l, rec);\n  NM_CONSERVATIVE(nm_register_value(&stack));\n  NODE*  curr  = l->first;\n\n  size_t offset = s.offset(rec);\n  size_t shape  = s.ref_shape(rec);\n\n  while (curr && curr->key < offset) curr = curr->next;\n  if (curr && curr->key - offset >= shape) curr = NULL;\n\n\n  if (rec) {\n    for (unsigned long index = 0; index < shape; ++index) { // index in reference\n      rb_ary_push(stack, LONG2NUM(index));\n      if (!curr || index < curr->key - offset) {\n        each_empty_with_indices_r(s, rec-1, stack);\n      } else { // index == curr->key - offset\n        each_with_indices_r(s, reinterpret_cast<const LIST*>(curr->val), rec-1, stack);\n        curr = curr->next;\n      }\n      rb_ary_pop(stack);\n    }\n  } else {\n    for (unsigned long index = 0; index < shape; ++index) {\n\n      rb_ary_push(stack, LONG2NUM(index));\n\n      if (!curr || index < curr->key - offset) {\n        rb_ary_unshift(stack, s.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(s.init()) : s.init_obj());\n\n      } else { // index == curr->key - offset\n        rb_ary_unshift(stack, s.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(curr->val) : nm::rubyobj_from_cval(curr->val, s.dtype()).rval);\n\n        curr = curr->next;\n      }\n      rb_yield_splat(stack);\n\n      rb_ary_shift(stack);\n      rb_ary_pop(stack);\n    }\n  }\n  NM_CONSERVATIVE(nm_unregister_value(&stack));\n  if (s.dtype() == nm::RUBYOBJ)\n    nm_list_storage_unregister_list(l, rec);\n}\n\n\n/*\n * Recursive helper function for each_stored_with_indices, based on nm_list_storage_count_elements_r.\n */\nstatic void each_stored_with_indices_r(nm::list_storage::RecurseData& s, const LIST* l, size_t rec, VALUE& stack) {\n  if (s.dtype() == nm::RUBYOBJ)\n    nm_list_storage_register_list(l, rec);\n  NM_CONSERVATIVE(nm_register_value(&stack));\n\n  NODE* curr = l->first;\n\n  size_t offset = s.offset(rec);\n  size_t shape  = s.ref_shape(rec);\n\n  while (curr && curr->key < offset) { curr = curr->next; }\n  if (curr && curr->key - offset >= shape) curr = NULL;\n\n  if (rec) {\n    while (curr) {\n\n      rb_ary_push(stack, LONG2NUM(static_cast<long>(curr->key - offset)));\n      each_stored_with_indices_r(s, reinterpret_cast<const LIST*>(curr->val), rec-1, stack);\n      rb_ary_pop(stack);\n\n      curr = curr->next;\n      if (curr && curr->key - offset >= shape) curr = NULL;\n    }\n  } else {\n    while (curr) {\n      rb_ary_push(stack, LONG2NUM(static_cast<long>(curr->key - offset))); // add index to end\n\n      // add value to beginning\n      rb_ary_unshift(stack, s.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(curr->val) : nm::rubyobj_from_cval(curr->val, s.dtype()).rval);\n      // yield to the whole stack (value, i, j, k, ...)\n      rb_yield_splat(stack);\n\n      // remove the value\n      rb_ary_shift(stack);\n\n      // remove the index from the end\n      rb_ary_pop(stack);\n\n      curr = curr->next;\n      if (curr && curr->key - offset >= shape) curr = NULL;\n    }\n  }\n  NM_CONSERVATIVE(nm_unregister_value(&stack));\n  if (s.dtype() == nm::RUBYOBJ)\n    nm_list_storage_unregister_list(l, rec);\n}\n\n\n/*\n * Each/each-stored iterator, brings along the indices.\n */\nVALUE nm_list_each_with_indices(VALUE nmatrix, bool stored) {\n\n  NM_CONSERVATIVE(nm_register_value(&nmatrix));\n\n  // If we don't have a block, return an enumerator.\n  RETURN_SIZED_ENUMERATOR_PRE\n  NM_CONSERVATIVE(nm_unregister_value(&nmatrix));\n  RETURN_SIZED_ENUMERATOR(nmatrix, 0, 0, 0);\n\n  nm::list_storage::RecurseData sdata(NM_STORAGE_LIST(nmatrix));\n\n  VALUE stack = rb_ary_new();\n\n  if (stored) each_stored_with_indices_r(sdata, sdata.top_level_list(), sdata.dim() - 1, stack);\n  else        each_with_indices_r(sdata, sdata.top_level_list(), sdata.dim() - 1, stack);\n\n  NM_CONSERVATIVE(nm_unregister_value(&nmatrix));\n  return nmatrix;\n}\n\n\n/*\n * map merged stored iterator. Always returns a matrix containing RubyObjects\n * which probably needs to be casted.\n */\nVALUE nm_list_map_stored(VALUE left, VALUE init) {\n  NM_CONSERVATIVE(nm_register_value(&left));\n  NM_CONSERVATIVE(nm_register_value(&init));\n\n  LIST_STORAGE *s = NM_STORAGE_LIST(left);\n\n  // For each matrix, if it's a reference, we want to deal directly with the\n  // original (with appropriate offsetting)\n  nm::list_storage::RecurseData sdata(s);\n\n  //if (!rb_block_given_p()) {\n  //  rb_raise(rb_eNotImpError, \"RETURN_SIZED_ENUMERATOR probably won't work for a map_merged since no merged object is created\");\n  //}\n  // If we don't have a block, return an enumerator.\n  RETURN_SIZED_ENUMERATOR_PRE\n  NM_CONSERVATIVE(nm_unregister_value(&left));\n  NM_CONSERVATIVE(nm_unregister_value(&init));\n  RETURN_SIZED_ENUMERATOR(left, 0, 0, 0); // FIXME: Test this. Probably won't work. Enable above code instead.\n\n  // Figure out default value if none provided by the user\n  if (init == Qnil) {\n    nm_unregister_value(&init);\n    init = rb_yield_values(1, sdata.init_obj());\n    nm_register_value(&init);\n  }\n  // Allocate a new shape array for the resulting matrix.\n  void* init_val = NM_ALLOC(VALUE);\n  memcpy(init_val, &init, sizeof(VALUE));\n  nm_register_value(&*reinterpret_cast<VALUE*>(init_val));\n\n  NMATRIX* result = nm_create(nm::LIST_STORE, nm_list_storage_create(nm::RUBYOBJ, sdata.copy_alloc_shape(), s->dim, init_val));\n  LIST_STORAGE* r = reinterpret_cast<LIST_STORAGE*>(result->storage);\n  nm::list_storage::RecurseData rdata(r, init);\n  nm_register_nmatrix(result);\n  map_stored_r(rdata, sdata, rdata.top_level_list(), sdata.top_level_list(), sdata.dim() - 1);\n\n  VALUE to_return = Data_Wrap_Struct(CLASS_OF(left), nm_mark, nm_delete, result);\n\n  nm_unregister_nmatrix(result);\n  nm_unregister_value(&*reinterpret_cast<VALUE*>(init_val));\n  NM_CONSERVATIVE(nm_unregister_value(&init));\n  NM_CONSERVATIVE(nm_unregister_value(&left));\n\n  return to_return;\n}\n\n\n/*\n * map merged stored iterator. Always returns a matrix containing RubyObjects which probably needs to be casted.\n */\nVALUE nm_list_map_merged_stored(VALUE left, VALUE right, VALUE init) {\n  NM_CONSERVATIVE(nm_register_value(&left));\n  NM_CONSERVATIVE(nm_register_value(&right));\n  NM_CONSERVATIVE(nm_register_value(&init));\n\n  bool scalar = false;\n\n  LIST_STORAGE *s   = NM_STORAGE_LIST(left),\n               *t;\n\n  // For each matrix, if it's a reference, we want to deal directly with the original (with appropriate offsetting)\n  nm::list_storage::RecurseData sdata(s);\n\n  void* scalar_init = NULL;\n\n  // right might be a scalar, in which case this is a scalar operation.\n  if (!IsNMatrixType(right)) {\n    nm::dtype_t r_dtype = Upcast[NM_DTYPE(left)][nm_dtype_min(right)];\n    scalar_init         = rubyobj_to_cval(right, r_dtype); // make a copy of right\n\n    t                   = reinterpret_cast<LIST_STORAGE*>(nm_list_storage_create(r_dtype, sdata.copy_alloc_shape(), s->dim, scalar_init));\n    scalar              = true;\n  } else {\n    t                   = NM_STORAGE_LIST(right); // element-wise, not scalar.\n  }\n\n  //if (!rb_block_given_p()) {\n  //  rb_raise(rb_eNotImpError, \"RETURN_SIZED_ENUMERATOR probably won't work for a map_merged since no merged object is created\");\n  //}\n  // If we don't have a block, return an enumerator.\n  RETURN_SIZED_ENUMERATOR_PRE\n  NM_CONSERVATIVE(nm_unregister_value(&left));\n  NM_CONSERVATIVE(nm_unregister_value(&right));\n  NM_CONSERVATIVE(nm_unregister_value(&init));\n  RETURN_SIZED_ENUMERATOR(left, 0, 0, 0); // FIXME: Test this. Probably won't work. Enable above code instead.\n\n  // Figure out default value if none provided by the user\n  nm::list_storage::RecurseData& tdata = *(new nm::list_storage::RecurseData(t)); //FIXME: this is a hack to make sure that we can run the destructor before nm_list_storage_delete(t) below.\n  if (init == Qnil) {\n    nm_unregister_value(&init);\n    init = rb_yield_values(2, sdata.init_obj(), tdata.init_obj());\n    nm_register_value(&init);\n  }\n\n  // Allocate a new shape array for the resulting matrix.\n  void* init_val = NM_ALLOC(VALUE);\n  memcpy(init_val, &init, sizeof(VALUE));\n  nm_register_value(&*reinterpret_cast<VALUE*>(init_val));\n\n  NMATRIX* result = nm_create(nm::LIST_STORE, nm_list_storage_create(nm::RUBYOBJ, sdata.copy_alloc_shape(), s->dim, init_val));\n  LIST_STORAGE* r = reinterpret_cast<LIST_STORAGE*>(result->storage);\n  nm::list_storage::RecurseData rdata(r, init);\n  map_merged_stored_r(rdata, sdata, tdata, rdata.top_level_list(), sdata.top_level_list(), tdata.top_level_list(), sdata.dim() - 1);\n\n  delete &tdata;\n  // If we are working with a scalar operation\n  if (scalar) nm_list_storage_delete(t);\n\n  VALUE to_return = Data_Wrap_Struct(CLASS_OF(left), nm_mark, nm_delete, result);\n\n  nm_unregister_value(&*reinterpret_cast<VALUE*>(init_val));\n\n  NM_CONSERVATIVE(nm_unregister_value(&init));\n  NM_CONSERVATIVE(nm_unregister_value(&right));\n  NM_CONSERVATIVE(nm_unregister_value(&left));\n\n  return to_return;\n}\n\n\n/*\n * Copy a slice of a list matrix into a regular list matrix.\n */\nstatic LIST* slice_copy(const LIST_STORAGE* src, LIST* src_rows, size_t* coords, size_t* lengths, size_t n) {\n  nm_list_storage_register(src);\n  void *val = NULL;\n  int key;\n\n  LIST* dst_rows = nm::list::create();\n  NODE* src_node = src_rows->first;\n  std::list<VALUE*> temp_vals;\n  std::list<LIST*> temp_lists;\n  while (src_node) {\n    key = src_node->key - (src->offset[n] + coords[n]);\n\n    if (key >= 0 && (size_t)key < lengths[n]) {\n      if (src->dim - n > 1) {\n        val = slice_copy( src,\n                          reinterpret_cast<LIST*>(src_node->val),\n                          coords,\n                          lengths,\n                          n + 1    );\n        if (val) {\n          if (src->dtype == nm::RUBYOBJ) {\n            nm_list_storage_register_list(reinterpret_cast<LIST*>(val), src->dim - n - 2);\n            temp_lists.push_front(reinterpret_cast<LIST*>(val));\n          }\n          nm::list::insert_copy(dst_rows, false, key, val, sizeof(LIST));\n        }\n      } else { // matches src->dim - n > 1\n        if (src->dtype == nm::RUBYOBJ) {\n          nm_register_value(&*reinterpret_cast<VALUE*>(src_node->val));\n          temp_vals.push_front(reinterpret_cast<VALUE*>(src_node->val));\n        }\n        nm::list::insert_copy(dst_rows, false, key, src_node->val, DTYPE_SIZES[src->dtype]);\n      }\n    }\n    src_node = src_node->next;\n }\n  if (src->dtype == nm::RUBYOBJ) {\n    __nm_list_storage_unregister_temp_list_list(temp_lists, src->dim - n - 2);\n    __nm_list_storage_unregister_temp_value_list(temp_vals);\n  }\n  nm_list_storage_unregister(src);\n  return dst_rows;\n}\n\n/*\n * Documentation goes here.\n */\nvoid* nm_list_storage_get(const STORAGE* storage, SLICE* slice) {\n  LIST_STORAGE* s = (LIST_STORAGE*)storage;\n  LIST_STORAGE* ns = NULL;\n\n  nm_list_storage_register(s);\n\n  if (slice->single) {\n    NODE* n = list_storage_get_single_node(s, slice);\n    nm_list_storage_unregister(s);\n    return (n ? n->val : s->default_val);\n  } else {\n    void *init_val = NM_ALLOC_N(char, DTYPE_SIZES[s->dtype]);\n    memcpy(init_val, s->default_val, DTYPE_SIZES[s->dtype]);\n    if (s->dtype == nm::RUBYOBJ)\n      nm_register_value(&*reinterpret_cast<VALUE*>(init_val));\n\n    size_t *shape = NM_ALLOC_N(size_t, s->dim);\n    memcpy(shape, slice->lengths, sizeof(size_t) * s->dim);\n\n    ns = nm_list_storage_create(s->dtype, shape, s->dim, init_val);\n\n    ns->rows = slice_copy(s, s->rows, slice->coords, slice->lengths, 0);\n\n    if (s->dtype == nm::RUBYOBJ) {\n      nm_unregister_value(&*reinterpret_cast<VALUE*>(init_val));\n    }\n\n    nm_list_storage_unregister(s);\n\n    return ns;\n  }\n}\n\n/*\n * Get the contents of some set of coordinates. Note: Does not make a copy!\n * Don't free!\n */\nvoid* nm_list_storage_ref(const STORAGE* storage, SLICE* slice) {\n  LIST_STORAGE* s = (LIST_STORAGE*)storage;\n  LIST_STORAGE* ns = NULL;\n  nm_list_storage_register(s);\n\n  //TODO: It needs a refactoring.\n  if (slice->single) {\n    NODE* n = list_storage_get_single_node(s, slice);\n    nm_list_storage_unregister(s);\n    return (n ? n->val : s->default_val);\n  } else {\n    ns = NM_ALLOC( LIST_STORAGE );\n\n    ns->dim = s->dim;\n    ns->dtype = s->dtype;\n    ns->offset = NM_ALLOC_N(size_t, ns->dim);\n    ns->shape = NM_ALLOC_N(size_t, ns->dim);\n\n    for (size_t i = 0; i < ns->dim; ++i) {\n      ns->offset[i] = slice->coords[i] + s->offset[i];\n      ns->shape[i]  = slice->lengths[i];\n    }\n\n    ns->rows        = s->rows;\n    ns->default_val = s->default_val;\n\n    s->src->count++;\n    ns->src         = s->src;\n    nm_list_storage_unregister(s);\n    return ns;\n  }\n}\n\n\n/*\n * Recursive function, sets multiple values in a matrix from a single source value.\n */\nstatic void slice_set_single(LIST_STORAGE* dest, LIST* l, void* val, size_t* coords, size_t* lengths, size_t n) {\n  nm_list_storage_register(dest);\n  if (dest->dtype == nm::RUBYOBJ) {\n    nm_register_value(&*reinterpret_cast<VALUE*>(val));\n    nm_list_storage_register_list(l, dest->dim - n - 1);\n  }\n\n  // drill down into the structure\n  NODE* node = NULL;\n  if (dest->dim - n > 1) {\n    std::list<LIST*> temp_nodes;\n    for (size_t i = 0; i < lengths[n]; ++i) {\n\n      size_t key = i + dest->offset[n] + coords[n];\n\n      if (!node) {\n        // try to insert list\n        node = nm::list::insert(l, false, key, nm::list::create());\n      } else if (!node->next || (node->next && node->next->key > key)) {\n        node = nm::list::insert_after(node, key, nm::list::create());\n      } else {\n        node = node->next; // correct rank already exists.\n      }\n\n      if (dest->dtype == nm::RUBYOBJ) {\n        temp_nodes.push_front(reinterpret_cast<LIST*>(node->val));\n        nm_list_storage_register_list(reinterpret_cast<LIST*>(node->val), dest->dim - n - 2);\n      }\n\n      // cast it to a list and recurse\n      slice_set_single(dest, reinterpret_cast<LIST*>(node->val), val, coords, lengths, n + 1);\n    }\n    __nm_list_storage_unregister_temp_list_list(temp_nodes, dest->dim - n - 2);\n  } else {\n    std::list<VALUE*> temp_vals;\n    for (size_t i = 0; i < lengths[n]; ++i) {\n\n      size_t key = i + dest->offset[n] + coords[n];\n\n      if (!node)  {\n        node = nm::list::insert_copy(l, true, key, val, DTYPE_SIZES[dest->dtype]);\n      } else {\n        node = nm::list::replace_insert_after(node, key, val, true, DTYPE_SIZES[dest->dtype]);\n      }\n      if (dest->dtype == nm::RUBYOBJ) {\n        temp_vals.push_front(reinterpret_cast<VALUE*>(node->val));\n        nm_register_value(&*reinterpret_cast<VALUE*>(node->val));\n      }\n    }\n    __nm_list_storage_unregister_temp_value_list(temp_vals);\n  }\n\n  nm_list_storage_unregister(dest);\n  if (dest->dtype == nm::RUBYOBJ) {\n    nm_unregister_value(&*reinterpret_cast<VALUE*>(val));\n    nm_list_storage_unregister_list(l, dest->dim - n - 1);\n  }\n}\n\n\n\n/*\n * Set a value or values in a list matrix.\n */\nvoid nm_list_storage_set(VALUE left, SLICE* slice, VALUE right) {\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::list_storage::set, void, VALUE, SLICE*, VALUE)\n  ttable[NM_DTYPE(left)](left, slice, right);\n}\n\n\n/*\n * Insert an entry directly in a row (not using copy! don't free after).\n *\n * Returns a pointer to the insertion location.\n *\n * TODO: Allow this function to accept an entire row and not just one value -- for slicing\n */\nNODE* nm_list_storage_insert(STORAGE* storage, SLICE* slice, void* val) {\n  LIST_STORAGE* s = (LIST_STORAGE*)storage;\n  nm_list_storage_register(s);\n  if (s->dtype == nm::RUBYOBJ)\n    nm_register_value(&*reinterpret_cast<VALUE*>(val));\n  // Pretend dims = 2\n  // Then coords is going to be size 2\n  // So we need to find out if some key already exists\n  size_t r;\n  NODE*  n;\n  LIST*  l = s->rows;\n\n  // drill down into the structure\n  for (r = 0; r < s->dim -1; ++r) {\n    n = nm::list::insert(l, false, s->offset[r] + slice->coords[s->dim - r], nm::list::create());\n    l = reinterpret_cast<LIST*>(n->val);\n  }\n\n  nm_list_storage_unregister(s);\n  if (s->dtype == nm::RUBYOBJ)\n    nm_unregister_value(&*reinterpret_cast<VALUE*>(val));\n\n  return nm::list::insert(l, true, s->offset[r] + slice->coords[r], val);\n}\n\n/*\n * Remove an item or slice from list storage.\n */\nvoid nm_list_storage_remove(STORAGE* storage, SLICE* slice) {\n  LIST_STORAGE* s = (LIST_STORAGE*)storage;\n\n  // This returns a boolean, which will indicate whether s->rows is empty.\n  // We can safely ignore it, since we never want to delete s->rows until\n  // it's time to destroy the LIST_STORAGE object.\n  nm::list::remove_recursive(s->rows, slice->coords, s->offset, slice->lengths, 0, s->dim);\n}\n\n///////////\n// Tests //\n///////////\n\n/*\n * Comparison of contents for list storage.\n */\nbool nm_list_storage_eqeq(const STORAGE* left, const STORAGE* right) {\n  NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::list_storage::eqeq_r, bool, nm::list_storage::RecurseData& left, nm::list_storage::RecurseData& right, const LIST* l, const LIST* r, size_t rec)\n\n  nm::list_storage::RecurseData ldata(reinterpret_cast<const LIST_STORAGE*>(left)),\n                                rdata(reinterpret_cast<const LIST_STORAGE*>(right));\n\n  return ttable[left->dtype][right->dtype](ldata, rdata, ldata.top_level_list(), rdata.top_level_list(), ldata.dim()-1);\n}\n\n//////////\n// Math //\n//////////\n\n\n/*\n * List storage matrix multiplication.\n */\nSTORAGE* nm_list_storage_matrix_multiply(const STORAGE_PAIR& casted_storage, size_t* resulting_shape, bool vector) {\n  free(resulting_shape);\n  rb_raise(rb_eNotImpError, \"multiplication not implemented for list-of-list matrices\");\n  return NULL;\n  //DTYPE_TEMPLATE_TABLE(dense_storage::matrix_multiply, NMATRIX*, STORAGE_PAIR, size_t*, bool);\n\n  //return ttable[reinterpret_cast<DENSE_STORAGE*>(casted_storage.left)->dtype](casted_storage, resulting_shape, vector);\n}\n\n\n/*\n * List storage to Hash conversion. Uses Hashes with default values, so you can continue to pretend\n * it's a sparse matrix.\n */\nVALUE nm_list_storage_to_hash(const LIST_STORAGE* s, const nm::dtype_t dtype) {\n  nm_list_storage_register(s);\n  // Get the default value for the list storage.\n  VALUE default_value = nm::rubyobj_from_cval(s->default_val, dtype).rval;\n  nm_list_storage_unregister(s);\n  // Recursively copy each dimension of the matrix into a nested hash.\n  return nm_list_copy_to_hash(s->rows, dtype, s->dim - 1, default_value);\n}\n\n/////////////\n// Utility //\n/////////////\n\n/*\n * Recursively count the non-zero elements in a list storage object.\n */\nsize_t nm_list_storage_count_elements_r(const LIST* l, size_t recursions) {\n  size_t count = 0;\n  NODE* curr = l->first;\n\n  if (recursions) {\n    while (curr) {\n      count += nm_list_storage_count_elements_r(reinterpret_cast<const LIST*>(curr->val), recursions - 1);\n      curr   = curr->next;\n    }\n\n  } else {\n    while (curr) {\n      ++count;\n      curr = curr->next;\n    }\n  }\n\n  return count;\n}\n\n/*\n * Count non-diagonal non-zero elements.\n */\nsize_t nm_list_storage_count_nd_elements(const LIST_STORAGE* s) {\n  NODE *i_curr, *j_curr;\n  size_t count = 0;\n\n  if (s->dim != 2) {\n    rb_raise(rb_eNotImpError, \"non-diagonal element counting only defined for dim = 2\");\n  }\n\n  for (i_curr = s->rows->first; i_curr; i_curr = i_curr->next) {\n    int i = i_curr->key - s->offset[0];\n    if (i < 0 || i >= (int)s->shape[0]) continue;\n\n    for (j_curr = ((LIST*)(i_curr->val))->first; j_curr; j_curr = j_curr->next) {\n      int j = j_curr->key - s->offset[1];\n      if (j < 0 || j >= (int)s->shape[1]) continue;\n\n      if (i != j)    ++count;\n    }\n  }\n\n  return count;\n}\n\n/////////////////////////\n// Copying and Casting //\n/////////////////////////\n//\n/*\n * List storage copy constructor C access.\n */\n\nLIST_STORAGE* nm_list_storage_copy(const LIST_STORAGE* rhs) {\n  nm_list_storage_register(rhs);\n  size_t *shape = NM_ALLOC_N(size_t, rhs->dim);\n  memcpy(shape, rhs->shape, sizeof(size_t) * rhs->dim);\n\n  void *init_val = NM_ALLOC_N(char, DTYPE_SIZES[rhs->dtype]);\n  memcpy(init_val, rhs->default_val, DTYPE_SIZES[rhs->dtype]);\n\n  LIST_STORAGE* lhs = nm_list_storage_create(rhs->dtype, shape, rhs->dim, init_val);\n  nm_list_storage_register(lhs);\n\n  lhs->rows = slice_copy(rhs, rhs->rows, lhs->offset, lhs->shape, 0);\n\n  nm_list_storage_unregister(rhs);\n  nm_list_storage_unregister(lhs);\n  return lhs;\n}\n\n/*\n * List storage copy constructor C access with casting.\n */\nSTORAGE* nm_list_storage_cast_copy(const STORAGE* rhs, nm::dtype_t new_dtype, void* dummy) {\n  NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::list_storage::cast_copy, LIST_STORAGE*, const LIST_STORAGE* rhs, nm::dtype_t new_dtype);\n\n  return (STORAGE*)ttable[new_dtype][rhs->dtype]((LIST_STORAGE*)rhs, new_dtype);\n}\n\n\n/*\n * List storage copy constructor for transposing.\n */\nSTORAGE* nm_list_storage_copy_transposed(const STORAGE* rhs_base) {\n  rb_raise(rb_eNotImpError, \"list storage transpose not yet implemented\");\n  return NULL;\n}\n\n\n} // end of extern \"C\" block\n\n\n/////////////////////////\n// Templated Functions //\n/////////////////////////\n\n\nnamespace nm {\nnamespace list_storage {\n\n\n/*\n * List storage copy constructor for changing dtypes.\n */\ntemplate <typename LDType, typename RDType>\nstatic LIST_STORAGE* cast_copy(const LIST_STORAGE* rhs, dtype_t new_dtype) {\n  nm_list_storage_register(rhs);\n  // allocate and copy shape\n  size_t* shape = NM_ALLOC_N(size_t, rhs->dim);\n  memcpy(shape, rhs->shape, rhs->dim * sizeof(size_t));\n\n  // copy default value\n  LDType* default_val = NM_ALLOC_N(LDType, 1);\n  *default_val = *reinterpret_cast<RDType*>(rhs->default_val);\n\n  LIST_STORAGE* lhs = nm_list_storage_create(new_dtype, shape, rhs->dim, default_val);\n  //lhs->rows         = nm::list::create();\n\n  nm_list_storage_register(lhs);\n  // TODO: Needs optimization. When matrix is reference it is copped twice.\n  if (rhs->src == rhs)\n    nm::list::cast_copy_contents<LDType, RDType>(lhs->rows, rhs->rows, rhs->dim - 1);\n  else {\n    LIST_STORAGE *tmp = nm_list_storage_copy(rhs);\n    nm_list_storage_register(tmp);\n    nm::list::cast_copy_contents<LDType, RDType>(lhs->rows, tmp->rows, rhs->dim - 1);\n    nm_list_storage_unregister(tmp);\n    nm_list_storage_delete(tmp);\n  }\n  nm_list_storage_unregister(lhs);\n  nm_list_storage_unregister(rhs);\n  return lhs;\n}\n\n\n/*\n * Recursive helper function for eqeq. Note that we use SDType and TDType instead of L and R because this function\n * is a re-labeling. That is, it can be called in order L,R or order R,L; and we don't want to get confused. So we\n * use S and T to denote first and second passed in.\n */\ntemplate <typename SDType, typename TDType>\nstatic bool eqeq_empty_r(RecurseData& s, const LIST* l, size_t rec, const TDType* t_init) {\n  NODE* curr  = l->first;\n\n  // For reference matrices, make sure we start in the correct place.\n  while (curr && curr->key < s.offset(rec)) {  curr = curr->next;  }\n  if (curr && curr->key - s.offset(rec) >= s.ref_shape(rec)) curr = NULL;\n\n  if (rec) {\n    while (curr) {\n      if (!eqeq_empty_r<SDType,TDType>(s, reinterpret_cast<const LIST*>(curr->val), rec-1, t_init)) return false;\n      curr = curr->next;\n\n      if (curr && curr->key - s.offset(rec) >= s.ref_shape(rec)) curr = NULL;\n    }\n  } else {\n    while (curr) {\n      if (*reinterpret_cast<SDType*>(curr->val) != *t_init) return false;\n      curr = curr->next;\n\n      if (curr && curr->key - s.offset(rec) >= s.ref_shape(rec)) curr = NULL;\n    }\n  }\n  return true;\n}\n\n\n\n/*\n * Do these two list matrices of the same dtype have exactly the same contents (accounting for default_vals)?\n *\n * This function is recursive.\n */\ntemplate <typename LDType, typename RDType>\nstatic bool eqeq_r(RecurseData& left, RecurseData& right, const LIST* l, const LIST* r, size_t rec) {\n  NODE *lcurr = l->first,\n       *rcurr = r->first;\n\n  // For reference matrices, make sure we start in the correct place.\n  while (lcurr && lcurr->key < left.offset(rec)) {  lcurr = lcurr->next;  }\n  while (rcurr && rcurr->key < right.offset(rec)) {  rcurr = rcurr->next;  }\n  if (rcurr && rcurr->key - right.offset(rec) >= left.ref_shape(rec)) rcurr = NULL;\n  if (lcurr && lcurr->key - left.offset(rec) >= left.ref_shape(rec)) lcurr = NULL;\n\n  bool compared = false;\n\n  if (rec) {\n\n    while (lcurr || rcurr) {\n\n      if (!rcurr || (lcurr && (lcurr->key - left.offset(rec) < rcurr->key - right.offset(rec)))) {\n        if (!eqeq_empty_r<LDType,RDType>(left, reinterpret_cast<const LIST*>(lcurr->val), rec-1, reinterpret_cast<const RDType*>(right.init()))) return false;\n        lcurr   = lcurr->next;\n      } else if (!lcurr || (rcurr && (rcurr->key - right.offset(rec) < lcurr->key - left.offset(rec)))) {\n        if (!eqeq_empty_r<RDType,LDType>(right, reinterpret_cast<const LIST*>(rcurr->val), rec-1, reinterpret_cast<const LDType*>(left.init()))) return false;\n        rcurr   = rcurr->next;\n      } else { // keys are == and both present\n        if (!eqeq_r<LDType,RDType>(left, right, reinterpret_cast<const LIST*>(lcurr->val), reinterpret_cast<const LIST*>(rcurr->val), rec-1)) return false;\n        lcurr   = lcurr->next;\n        rcurr   = rcurr->next;\n      }\n      if (rcurr && rcurr->key - right.offset(rec) >= right.ref_shape(rec)) rcurr = NULL;\n      if (lcurr && lcurr->key - left.offset(rec)  >= left.ref_shape(rec)) lcurr = NULL;\n      compared = true;\n    }\n  } else {\n    while (lcurr || rcurr) {\n\n      if (rcurr && rcurr->key - right.offset(rec) >= left.ref_shape(rec)) rcurr = NULL;\n      if (lcurr && lcurr->key - left.offset(rec) >= left.ref_shape(rec)) lcurr = NULL;\n\n      if (!rcurr || (lcurr && (lcurr->key - left.offset(rec) < rcurr->key - right.offset(rec)))) {\n        if (*reinterpret_cast<LDType*>(lcurr->val) != *reinterpret_cast<const RDType*>(right.init())) return false;\n        lcurr         = lcurr->next;\n      } else if (!lcurr || (rcurr && (rcurr->key - right.offset(rec) < lcurr->key - left.offset(rec)))) {\n        if (*reinterpret_cast<RDType*>(rcurr->val) != *reinterpret_cast<const LDType*>(left.init())) return false;\n        rcurr         = rcurr->next;\n      } else { // keys == and both left and right nodes present\n        if (*reinterpret_cast<LDType*>(lcurr->val) != *reinterpret_cast<RDType*>(rcurr->val)) return false;\n        lcurr         = lcurr->next;\n        rcurr         = rcurr->next;\n      }\n      if (rcurr && rcurr->key - right.offset(rec) >= right.ref_shape(rec)) rcurr = NULL;\n      if (lcurr && lcurr->key - left.offset(rec)  >= left.ref_shape(rec)) lcurr = NULL;\n      compared = true;\n    }\n  }\n\n  // Final condition: both containers are empty, and have different default values.\n  if (!compared && !lcurr && !rcurr) return *reinterpret_cast<const LDType*>(left.init()) == *reinterpret_cast<const RDType*>(right.init());\n  return true;\n}\n\n\n}} // end of namespace nm::list_storage\n\nextern \"C\" {\n  /*\n   * call-seq:\n   *     __list_to_hash__ -> Hash\n   *\n   * Create a Ruby Hash from a list NMatrix.\n   *\n   * This is an internal C function which handles list stype only.\n   */\n  VALUE nm_to_hash(VALUE self) {\n    return nm_list_storage_to_hash(NM_STORAGE_LIST(self), NM_DTYPE(self));\n  }\n\n  /*\n   * call-seq:\n   *     __list_default_value__ -> ...\n   *\n   * Get the default_value property from a list matrix.\n   */\n  VALUE nm_list_default_value(VALUE self) {\n    NM_CONSERVATIVE(nm_register_value(&self));\n    VALUE to_return = (NM_DTYPE(self) == nm::RUBYOBJ) ? *reinterpret_cast<VALUE*>(NM_DEFAULT_VAL(self)) : nm::rubyobj_from_cval(NM_DEFAULT_VAL(self), NM_DTYPE(self)).rval;\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n    return to_return;\n  }\n} // end of extern \"C\" block\n"
  },
  {
    "path": "ext/nmatrix/storage/list/list.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == list.h\n//\n// List-of-lists n-dimensional matrix storage. Uses singly-linked\n// lists.\n\n#ifndef LIST_H\n#define LIST_H\n\n/*\n * Standard Includes\n */\n\n#include <ruby.h>\n#include <cstdlib>\n#include <list>\n/*\n * Project Includes\n */\n\n#include \"types.h\"\n#include \"data/data.h\"\n#include \"../common.h\"\n#include \"util/sl_list.h\"\n#include \"nmatrix.h\"\n\n/*\n * Macros\n */\n\n/*\n * Types\n */\n\n\n\n/*\n * Data\n */\n \nextern \"C\" {\n\n  /*\n   * Functions\n   */\n\n  ////////////////\n  // Lifecycle //\n  ///////////////\n\n  LIST_STORAGE*  nm_list_storage_create(nm::dtype_t dtype, size_t* shape, size_t dim, void* init_val);\n  void          nm_list_storage_delete(STORAGE* s);\n  void          nm_list_storage_delete_ref(STORAGE* s);\n  void          nm_list_storage_mark(STORAGE*);\n  void          nm_list_storage_register(const STORAGE* s);\n  void          nm_list_storage_unregister(const STORAGE* s);\n  void          nm_list_storage_register_list(const LIST* l, size_t recursions);\n  void          nm_list_storage_unregister_list(const LIST* l, size_t recursions);\n  void          nm_list_storage_register_node(const NODE* n);\n  void          nm_list_storage_unregister_node(const NODE* n);\n  void          nm_list_storage_completely_unregister_node(const NODE* curr);\n  ///////////////\n  // Accessors //\n  ///////////////\n\n  VALUE nm_list_each_with_indices(VALUE nmatrix, bool stored);\n  void* nm_list_storage_ref(const STORAGE* s, SLICE* slice);\n  void* nm_list_storage_get(const STORAGE* s, SLICE* slice);\n  NODE* nm_list_storage_insert(STORAGE* s, SLICE* slice, void* val);\n  void  nm_list_storage_set(VALUE left, SLICE* slice, VALUE right);\n  void  nm_list_storage_remove(STORAGE* s, SLICE* slice);\n\n  ///////////\n  // Tests //\n  ///////////\n\n  bool nm_list_storage_eqeq(const STORAGE* left, const STORAGE* right);\n\n  //////////\n  // Math //\n  //////////\n\n  STORAGE* nm_list_storage_matrix_multiply(const STORAGE_PAIR& casted_storage, size_t* resulting_shape, bool vector);\n\n\n  /////////////\n  // Utility //\n  /////////////\n\n  size_t nm_list_storage_count_elements_r(const LIST* l, size_t recursions);\n  size_t nm_list_storage_count_nd_elements(const LIST_STORAGE* s);\n\n  /*\n   * Count non-zero elements. See also count_list_storage_nd_elements.\n   */\n  inline size_t nm_list_storage_count_elements(const LIST_STORAGE* s) {\n    return nm_list_storage_count_elements_r(s->rows, s->dim - 1);\n  }\n\n  /////////////////////////\n  // Copying and Casting //\n  /////////////////////////\n\n  LIST_STORAGE* nm_list_storage_copy(const LIST_STORAGE* rhs);\n  STORAGE*      nm_list_storage_copy_transposed(const STORAGE* rhs_base);\n  STORAGE*      nm_list_storage_cast_copy(const STORAGE* rhs, nm::dtype_t new_dtype, void*);\n  VALUE         nm_list_storage_to_hash(const LIST_STORAGE* s, const nm::dtype_t dtype);\n\n  // Exposed functions\n  VALUE nm_to_hash(VALUE self);\n  VALUE nm_list_map_merged_stored(VALUE left, VALUE right, VALUE init);\n  VALUE nm_list_map_stored(VALUE left, VALUE init);\n  VALUE nm_list_default_value(VALUE self);\n} // end of extern \"C\" block\n\n#endif // LIST_H\n"
  },
  {
    "path": "ext/nmatrix/storage/storage.cpp",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == storage.cpp\n//\n// Code that is used by or involves more then one storage type.\n\n/*\n * Standard Includes\n */\n\n/*\n * Project Includes\n */\n\n#include \"data/data.h\"\n\n#include \"storage.h\"\n\n#include \"common.h\"\n\n/*\n * Macros\n */\n\n/*\n * Global Variables\n */\n\nextern \"C\" {\n\nconst char* const STYPE_NAMES[nm::NUM_STYPES] = {\n  \"dense\",\n  \"list\",\n  \"yale\"\n};\n\n} // end extern \"C\" block\n\n/*\n * Forward Declarations\n */\n\nnamespace nm {\n\n\n/*\n * Functions\n */\n\n/////////////////////////\n// Templated Functions //\n/////////////////////////\n\nnamespace dense_storage {\n\ntemplate <typename LDType, typename RDType>\nstatic void cast_copy_list_contents(LDType* lhs, const LIST* rhs, RDType* default_val,\n  size_t& pos, const size_t* shape, size_t dim, size_t max_elements, size_t recursions);\n\ntemplate <typename LDType, typename RDType>\nstatic void cast_copy_list_default(LDType* lhs, RDType* default_val, size_t& pos,\n  const size_t* shape, size_t dim, size_t max_elements, size_t recursions);\n\n/*\n * Convert (by creating a copy) from list storage to dense storage.\n */\ntemplate <typename LDType, typename RDType>\nDENSE_STORAGE* create_from_list_storage(const LIST_STORAGE* rhs, dtype_t l_dtype) {\n  nm_list_storage_register(rhs);\n  // allocate and copy shape\n  size_t* shape = NM_ALLOC_N(size_t, rhs->dim);\n  memcpy(shape, rhs->shape, rhs->dim * sizeof(size_t));\n\n  DENSE_STORAGE* lhs = nm_dense_storage_create(l_dtype, shape, rhs->dim, NULL, 0);\n\n  // Position in lhs->elements.\n  size_t pos = 0;\n  size_t max_elements = nm_storage_count_max_elements(rhs);\n\n//static void dense_storage_cast_copy_list_contents_template(LDType* lhs, const LIST* rhs, RDType* default_val, size_t& pos, const size_t* shape, size_t dim, size_t max_elements, size_t recursions)\n  // recursively copy the contents\n  if (rhs->src == rhs)\n    cast_copy_list_contents<LDType,RDType>(reinterpret_cast<LDType*>(lhs->elements),\n                                         rhs->rows,\n                                         reinterpret_cast<RDType*>(rhs->default_val),\n                                         pos, shape, lhs->dim, max_elements, rhs->dim-1);\n  else {\n    LIST_STORAGE *tmp = nm_list_storage_copy(rhs);\n    cast_copy_list_contents<LDType,RDType>(reinterpret_cast<LDType*>(lhs->elements),\n                                         tmp->rows,\n                                         reinterpret_cast<RDType*>(tmp->default_val),\n                                         pos, shape, lhs->dim, max_elements, tmp->dim-1);\n    nm_list_storage_delete(tmp);\n\n  }\n  nm_list_storage_unregister(rhs);\n\n  return lhs;\n}\n\n\n\n\n/*\n * Create/allocate dense storage, copying into it the contents of a Yale matrix.\n */\ntemplate <typename LDType, typename RDType>\nDENSE_STORAGE* create_from_yale_storage(const YALE_STORAGE* rhs, dtype_t l_dtype) {\n\n  nm_yale_storage_register(rhs);\n  // Position in rhs->elements.\n  IType*  rhs_ija = reinterpret_cast<YALE_STORAGE*>(rhs->src)->ija;\n  RDType* rhs_a   = reinterpret_cast<RDType*>(reinterpret_cast<YALE_STORAGE*>(rhs->src)->a);\n\n  // Allocate and set shape.\n  size_t* shape = NM_ALLOC_N(size_t, rhs->dim);\n  shape[0] = rhs->shape[0];\n  shape[1] = rhs->shape[1];\n\n  DENSE_STORAGE* lhs = nm_dense_storage_create(l_dtype, shape, rhs->dim, NULL, 0);\n  LDType* lhs_elements = reinterpret_cast<LDType*>(lhs->elements);\n\n  // Position in dense to write to.\n  size_t pos = 0;\n\n  LDType LCAST_ZERO = rhs_a[rhs->src->shape[0]];\n\n  // Walk through rows. For each entry we set in dense, increment pos.\n  for (size_t i = 0; i < shape[0]; ++i) {\n    IType ri = i + rhs->offset[0];\n\n    if (rhs_ija[ri] == rhs_ija[ri+1]) { // Check boundaries of row: is row empty? (Yes.)\n\n      // Write zeros in each column.\n      for (size_t j = 0; j < shape[1]; ++j) { // Move to next dense position.\n\n        // Fill in zeros and copy the diagonal entry for this empty row.\n        if (ri == j + rhs->offset[1]) lhs_elements[pos] = static_cast<LDType>(rhs_a[ri]);\n        else                          lhs_elements[pos] = LCAST_ZERO;\n\n        ++pos;\n      }\n\n    } else {  // Row contains entries: write those in each column, interspersed with zeros.\n\n      // Get the first ija position of the row (as sliced)\n      IType ija = nm::yale_storage::binary_search_left_boundary(rhs, rhs_ija[ri], rhs_ija[ri+1]-1, rhs->offset[1]);\n\n      // What column is it?\n      IType next_stored_rj = rhs_ija[ija];\n\n      for (size_t j = 0; j < shape[1]; ++j) {\n        IType rj = j + rhs->offset[1];\n\n        if (rj == ri) { // at a diagonal in RHS\n          lhs_elements[pos] = static_cast<LDType>(rhs_a[ri]);\n\n        } else if (rj == next_stored_rj) { // column ID was found in RHS\n          lhs_elements[pos] = static_cast<LDType>(rhs_a[ija]); // Copy from rhs.\n\n          // Get next.\n          ++ija;\n\n          // Increment to next column ID (or go off the end).\n          if (ija < rhs_ija[ri+1]) next_stored_rj = rhs_ija[ija];\n          else                      next_stored_rj = rhs->src->shape[1];\n\n        } else { // rj < next_stored_rj\n\n          // Insert zero.\n          lhs_elements[pos] = LCAST_ZERO;\n        }\n\n        // Move to next dense position.\n        ++pos;\n      }\n    }\n  }\n  nm_yale_storage_unregister(rhs);\n\n  return lhs;\n}\n\n\n/*\n * Copy list contents into dense recursively.\n */\ntemplate <typename LDType, typename RDType>\nstatic void cast_copy_list_contents(LDType* lhs, const LIST* rhs, RDType* default_val, size_t& pos, const size_t* shape, size_t dim, size_t max_elements, size_t recursions) {\n\n  NODE *curr = rhs->first;\n  int last_key = -1;\n\n  nm_list_storage_register_list(rhs, recursions);\n\n  for (size_t i = 0; i < shape[dim - 1 - recursions]; ++i, ++pos) {\n\n    if (!curr || (curr->key > (size_t)(last_key+1))) {\n\n      if (recursions == 0)  lhs[pos] = static_cast<LDType>(*default_val);\n      else                   cast_copy_list_default<LDType,RDType>(lhs, default_val, pos, shape, dim, max_elements, recursions-1);\n\n      ++last_key;\n\n    } else {\n\n      if (recursions == 0)  lhs[pos] = static_cast<LDType>(*reinterpret_cast<RDType*>(curr->val));\n      else                  cast_copy_list_contents<LDType,RDType>(lhs, (const LIST*)(curr->val),\n                                                                                         default_val, pos, shape, dim, max_elements, recursions-1);\n\n      last_key = curr->key;\n      curr     = curr->next;\n    }\n  }\n\n  nm_list_storage_unregister_list(rhs, recursions);\n\n  --pos;\n}\n\n/*\n * Copy a set of default values into dense.\n */\ntemplate <typename LDType,typename RDType>\nstatic void cast_copy_list_default(LDType* lhs, RDType* default_val, size_t& pos, const size_t* shape, size_t dim, size_t max_elements, size_t recursions) {\n  for (size_t i = 0; i < shape[dim - 1 - recursions]; ++i, ++pos) {\n\n    if (recursions == 0)    lhs[pos] = static_cast<LDType>(*default_val);\n    else                    cast_copy_list_default<LDType,RDType>(lhs, default_val, pos, shape, dim, max_elements, recursions-1);\n\n  }\n\n  --pos;\n}\n\n\n} // end of namespace dense_storage\n\nnamespace list_storage {\n\n\ntemplate <typename LDType, typename RDType>\nstatic bool cast_copy_contents_dense(LIST* lhs, const RDType* rhs, RDType* zero, size_t& pos, size_t* coords, const size_t* shape, size_t dim, size_t recursions);\n\n/*\n * Creation of list storage from dense storage.\n */\ntemplate <typename LDType, typename RDType>\nLIST_STORAGE* create_from_dense_storage(const DENSE_STORAGE* rhs, dtype_t l_dtype, void* init) {\n  nm_dense_storage_register(rhs);\n\n  LDType* l_default_val = NM_ALLOC_N(LDType, 1);\n  RDType* r_default_val = NM_ALLOCA_N(RDType, 1); // clean up when finished with this function\n\n  // allocate and copy shape and coords\n  size_t *shape  = NM_ALLOC_N(size_t, rhs->dim),\n         *coords = NM_ALLOC_N(size_t, rhs->dim);\n\n  memcpy(shape, rhs->shape, rhs->dim * sizeof(size_t));\n  memset(coords, 0, rhs->dim * sizeof(size_t));\n\n  // set list default_val to 0\n  if (init) *l_default_val = *reinterpret_cast<LDType*>(init);\n  else {\n    if (l_dtype == RUBYOBJ)    *l_default_val = INT2FIX(0);\n    else                      *l_default_val = 0;\n  }\n\n  // need test default value for comparing to elements in dense matrix\n  if (rhs->dtype == l_dtype || rhs->dtype != RUBYOBJ) *r_default_val = static_cast<RDType>(*l_default_val);\n  else                                                *r_default_val = nm::rubyobj_from_cval(l_default_val, l_dtype);\n\n\n  LIST_STORAGE* lhs = nm_list_storage_create(l_dtype, shape, rhs->dim, l_default_val);\n\n  nm_list_storage_register(lhs);\n\n  size_t pos = 0;\n\n  if (rhs->src == rhs)\n    list_storage::cast_copy_contents_dense<LDType,RDType>(lhs->rows,\n                                                          reinterpret_cast<const RDType*>(rhs->elements),\n                                                        r_default_val,\n                                                        pos, coords, rhs->shape, rhs->dim, rhs->dim - 1);\n  else {\n    DENSE_STORAGE* tmp = nm_dense_storage_copy(rhs);\n    list_storage::cast_copy_contents_dense<LDType,RDType>(lhs->rows,\n                                                          reinterpret_cast<const RDType*>(tmp->elements),\n                                                        r_default_val,\n                                                        pos, coords, rhs->shape, rhs->dim, rhs->dim - 1);\n\n    nm_dense_storage_delete(tmp);\n  }\n\n  nm_list_storage_unregister(lhs);\n  nm_dense_storage_unregister(rhs);\n\n  return lhs;\n}\n\n\n\n/*\n * Creation of list storage from yale storage.\n */\ntemplate <typename LDType, typename RDType>\nLIST_STORAGE* create_from_yale_storage(const YALE_STORAGE* rhs, dtype_t l_dtype) {\n  // allocate and copy shape\n  nm_yale_storage_register(rhs);\n\n  size_t *shape = NM_ALLOC_N(size_t, rhs->dim);\n  shape[0] = rhs->shape[0]; shape[1] = rhs->shape[1];\n\n  RDType* rhs_a    = reinterpret_cast<RDType*>(reinterpret_cast<YALE_STORAGE*>(rhs->src)->a);\n  RDType R_ZERO    = rhs_a[ rhs->src->shape[0] ];\n\n  // copy default value from the zero location in the Yale matrix\n  LDType* default_val = NM_ALLOC_N(LDType, 1);\n  *default_val        = static_cast<LDType>(R_ZERO);\n\n  LIST_STORAGE* lhs = nm_list_storage_create(l_dtype, shape, rhs->dim, default_val);\n\n  if (rhs->dim != 2)    rb_raise(nm_eStorageTypeError, \"Can only convert matrices of dim 2 from yale.\");\n\n  IType* rhs_ija  = reinterpret_cast<YALE_STORAGE*>(rhs->src)->ija;\n\n  NODE *last_row_added = NULL;\n  // Walk through rows and columns as if RHS were a dense matrix\n  for (IType i = 0; i < shape[0]; ++i) {\n    IType ri = i + rhs->offset[0];\n\n    NODE *last_added = NULL;\n\n    // Get boundaries of beginning and end of row\n    IType ija      = rhs_ija[ri],\n          ija_next = rhs_ija[ri+1];\n\n    // Are we going to need to add a diagonal for this row?\n    bool add_diag = false;\n    if (rhs_a[ri] != R_ZERO) add_diag = true; // non-zero and located within the bounds of the slice\n\n    if (ija < ija_next || add_diag) {\n      ija = nm::yale_storage::binary_search_left_boundary(rhs, ija, ija_next-1, rhs->offset[1]);\n\n      LIST* curr_row = list::create();\n\n      LDType* insert_val;\n\n      while (ija < ija_next) {\n        // Find first column in slice\n        IType rj = rhs_ija[ija];\n        IType j  = rj - rhs->offset[1];\n\n        // Is there a nonzero diagonal item between the previously added item and the current one?\n        if (rj > ri && add_diag) {\n          // Allocate and copy insertion value\n          insert_val  = NM_ALLOC_N(LDType, 1);\n          *insert_val = static_cast<LDType>(rhs_a[ri]);\n\n          // Insert the item in the list at the appropriate location.\n          // What is the appropriate key? Well, it's definitely right(i)==right(j), but the\n          // rj index has already been advanced past ri. So we should treat ri as the column and\n          // subtract offset[1].\n          if (last_added)   last_added = list::insert_after(last_added, ri - rhs->offset[1], insert_val);\n          else              last_added = list::insert(curr_row, false,  ri - rhs->offset[1], insert_val);\n\n          // don't add again!\n          add_diag = false;\n        }\n\n        // now allocate and add the current item\n        insert_val  = NM_ALLOC_N(LDType, 1);\n        *insert_val = static_cast<LDType>(rhs_a[ija]);\n\n        if (last_added)      last_added = list::insert_after(last_added, j, insert_val);\n        else                last_added = list::insert(curr_row, false, j, insert_val);\n\n        ++ija; // move to next entry in Yale matrix\n      }\n\n      if (add_diag) {\n\n        // still haven't added the diagonal.\n        insert_val         = NM_ALLOC_N(LDType, 1);\n        *insert_val        = static_cast<LDType>(rhs_a[ri]);\n\n        // insert the item in the list at the appropriate location\n        if (last_added)      last_added = list::insert_after(last_added, ri - rhs->offset[1], insert_val);\n        else                last_added = list::insert(curr_row, false, ri - rhs->offset[1], insert_val);\n\n        // no need to set add_diag to false because it'll be reset automatically in next iteration.\n      }\n\n      // Now add the list at the appropriate location\n      if (last_row_added)   last_row_added = list::insert_after(last_row_added, i, curr_row);\n      else                  last_row_added = list::insert(lhs->rows, false, i, curr_row);\n    }\n\n    // end of walk through rows\n  }\n\n  nm_yale_storage_unregister(rhs);\n\n  return lhs;\n}\n\n\n/* Copy dense into lists recursively\n *\n * FIXME: This works, but could probably be cleaner (do we really need to pass coords around?)\n */\ntemplate <typename LDType, typename RDType>\nstatic bool cast_copy_contents_dense(LIST* lhs, const RDType* rhs, RDType* zero, size_t& pos, size_t* coords, const size_t* shape, size_t dim, size_t recursions) {\n\n  nm_list_storage_register_list(lhs, recursions);\n\n  NODE *prev = NULL;\n  LIST *sub_list;\n  bool added = false, added_list = false;\n  //void* insert_value;\n\n  for (coords[dim-1-recursions] = 0; coords[dim-1-recursions] < shape[dim-1-recursions]; ++coords[dim-1-recursions], ++pos) {\n\n    if (recursions == 0) {\n      // create nodes\n\n      if (rhs[pos] != *zero) {\n        // is not zero\n\n        // Create a copy of our value that we will insert in the list\n        LDType* insert_value = NM_ALLOC_N(LDType, 1);\n        *insert_value        = static_cast<LDType>(rhs[pos]);\n\n        if (!lhs->first)    prev = list::insert(lhs, false, coords[dim-1-recursions], insert_value);\n        else                 prev = list::insert_after(prev, coords[dim-1-recursions], insert_value);\n\n        added = true;\n      }\n      // no need to do anything if the element is zero\n\n    } else { // create lists\n      // create a list as if there's something in the row in question, and then delete it if nothing turns out to be there\n      sub_list = list::create();\n\n      added_list = list_storage::cast_copy_contents_dense<LDType,RDType>(sub_list, rhs, zero, pos, coords, shape, dim, recursions-1);\n\n      if (!added_list)        list::del(sub_list, recursions-1);\n      else if (!lhs->first)    prev = list::insert(lhs, false, coords[dim-1-recursions], sub_list);\n      else                    prev = list::insert_after(prev, coords[dim-1-recursions], sub_list);\n\n      // added = (added || added_list);\n    }\n  }\n\n  nm_list_storage_unregister_list(lhs, recursions);\n\n  coords[dim-1-recursions] = 0;\n  --pos;\n\n  return added;\n}\n\n} // end of namespace list_storage\n\n\nnamespace yale_storage { // FIXME: Move to yale.cpp\n  /*\n   * Creation of yale storage from dense storage.\n   */\n  template <typename LDType, typename RDType>\n  YALE_STORAGE* create_from_dense_storage(const DENSE_STORAGE* rhs, dtype_t l_dtype, void* init) {\n\n    if (rhs->dim != 2) rb_raise(nm_eStorageTypeError, \"can only convert matrices of dim 2 to yale\");\n\n    nm_dense_storage_register(rhs);\n\n    IType pos = 0;\n    IType ndnz = 0;\n\n    // We need a zero value. This should nearly always be zero, but sometimes you might want false or nil.\n    LDType    L_INIT(0);\n    if (init) {\n      if (l_dtype == RUBYOBJ) L_INIT = *reinterpret_cast<VALUE*>(init);\n      else                    L_INIT = *reinterpret_cast<LDType*>(init);\n    }\n    RDType R_INIT = static_cast<RDType>(L_INIT);\n\n    RDType* rhs_elements = reinterpret_cast<RDType*>(rhs->elements);\n\n    // First, count the non-diagonal nonzeros\n    for (size_t i = rhs->shape[0]; i-- > 0;) {\n      for (size_t j = rhs->shape[1]; j-- > 0;) {\n        pos = rhs->stride[0]*(i + rhs->offset[0]) + rhs->stride[1]*(j + rhs->offset[1]);\n        if (i != j && rhs_elements[pos] != R_INIT)  ++ndnz;\n\n        // move forward 1 position in dense matrix elements array\n      }\n    }\n\n    // Copy shape for yale construction\n    size_t* shape = NM_ALLOC_N(size_t, 2);\n    shape[0] = rhs->shape[0];\n    shape[1] = rhs->shape[1];\n\n    size_t request_capacity = shape[0] + ndnz + 1;\n\n    // Create with minimum possible capacity -- just enough to hold all of the entries\n    YALE_STORAGE* lhs = nm_yale_storage_create(l_dtype, shape, 2, request_capacity);\n\n    if (lhs->capacity < request_capacity)\n      rb_raise(nm_eStorageTypeError, \"conversion failed; capacity of %ld requested, max allowable is %ld\", (unsigned long)request_capacity, (unsigned long)(lhs->capacity));\n\n    LDType* lhs_a     = reinterpret_cast<LDType*>(lhs->a);\n    IType* lhs_ija    = lhs->ija;\n\n    // Set the zero position in the yale matrix\n    lhs_a[shape[0]]   = L_INIT;\n\n    // Start just after the zero position.\n    IType ija = shape[0]+1;\n    pos       = 0;\n\n    // Copy contents\n    for (IType i = 0; i < rhs->shape[0]; ++i) {\n      // indicate the beginning of a row in the IJA array\n      lhs_ija[i] = ija;\n\n      for (IType j = 0; j < rhs->shape[1];  ++j) {\n        pos = rhs->stride[0] * (i + rhs->offset[0]) + rhs->stride[1] * (j + rhs->offset[1]); // calc position with offsets\n\n        if (i == j) { // copy to diagonal\n          lhs_a[i]     = static_cast<LDType>(rhs_elements[pos]);\n        } else if (rhs_elements[pos] != R_INIT) { // copy nonzero to LU\n          lhs_ija[ija] = j; // write column index\n          lhs_a[ija]   = static_cast<LDType>(rhs_elements[pos]);\n\n          ++ija;\n        }\n      }\n    }\n\n    lhs_ija[shape[0]] = ija; // indicate the end of the last row\n    lhs->ndnz = ndnz;\n\n    nm_dense_storage_unregister(rhs);\n\n    return lhs;\n  }\n\n  /*\n   * Creation of yale storage from list storage.\n   */\n  template <typename LDType, typename RDType>\n  YALE_STORAGE* create_from_list_storage(const LIST_STORAGE* rhs, nm::dtype_t l_dtype) {\n    if (rhs->dim != 2) rb_raise(nm_eStorageTypeError, \"can only convert matrices of dim 2 to yale\");\n\n    if (rhs->dtype == RUBYOBJ) {\n      VALUE init_val = *reinterpret_cast<VALUE*>(rhs->default_val);\n      if (rb_funcall(init_val, rb_intern(\"!=\"), 1, Qnil) == Qtrue && rb_funcall(init_val, rb_intern(\"!=\"), 1, Qfalse) == Qtrue && rb_funcall(init_val, rb_intern(\"!=\"), 1, INT2FIX(0)) == Qtrue)\n        rb_raise(nm_eStorageTypeError, \"list matrix of Ruby objects must have default value equal to 0, nil, or false to convert to yale\");\n    } else if (strncmp(reinterpret_cast<const char*>(rhs->default_val), \"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\", DTYPE_SIZES[rhs->dtype]))\n      rb_raise(nm_eStorageTypeError, \"list matrix of non-Ruby objects must have default value of 0 to convert to yale\");\n\n    nm_list_storage_register(rhs);\n\n    size_t ndnz = nm_list_storage_count_nd_elements(rhs);\n    // Copy shape for yale construction\n    size_t* shape = NM_ALLOC_N(size_t, 2);\n    shape[0] = rhs->shape[0];\n    shape[1] = rhs->shape[1];\n\n    size_t request_capacity = shape[0] + ndnz + 1;\n    YALE_STORAGE* lhs = nm_yale_storage_create(l_dtype, shape, 2, request_capacity);\n\n    if (lhs->capacity < request_capacity)\n      rb_raise(nm_eStorageTypeError, \"conversion failed; capacity of %ld requested, max allowable is %ld\", (unsigned long)request_capacity, (unsigned long)(lhs->capacity));\n\n    // Initialize the A and IJA arrays\n    init<LDType>(lhs, rhs->default_val);\n\n    IType*  lhs_ija = lhs->ija;\n    LDType* lhs_a   = reinterpret_cast<LDType*>(lhs->a);\n\n    IType ija = lhs->shape[0]+1;\n\n    // Copy contents \n    for (NODE* i_curr = rhs->rows->first; i_curr; i_curr = i_curr->next) {\n\n      // Shrink reference\n      int i = i_curr->key - rhs->offset[0];\n      if (i < 0 || i >= (int)rhs->shape[0]) continue;\n\n      for (NODE* j_curr = ((LIST*)(i_curr->val))->first; j_curr; j_curr = j_curr->next) {\n        \n        // Shrink reference\n        int j = j_curr->key - rhs->offset[1];\n        if (j < 0 || j >= (int)rhs->shape[1]) continue;\n\n        LDType cast_jcurr_val = *reinterpret_cast<RDType*>(j_curr->val);\n        if (i_curr->key - rhs->offset[0] == j_curr->key - rhs->offset[1])\n          lhs_a[i_curr->key - rhs->offset[0]] = cast_jcurr_val; // set diagonal\n        else {\n          lhs_ija[ija] = j_curr->key - rhs->offset[1];    // set column value\n\n          lhs_a[ija]   = cast_jcurr_val;                      // set cell value\n\n          ++ija;\n          // indicate the beginning of a row in the IJA array\n          for (size_t i = i_curr->key - rhs->offset[0] + 1; i < rhs->shape[0] + rhs->offset[0]; ++i) {\n            lhs_ija[i] = ija;\n          }\n\n        }\n      }\n\n    }\n    \n    lhs_ija[rhs->shape[0]] = ija; // indicate the end of the last row\n    lhs->ndnz = ndnz;\n\n    nm_list_storage_unregister(rhs);\n\n    return lhs;\n  }\n\n} // end of namespace yale_storage\n} // end of namespace nm\n\nextern \"C\" {\n\n  /*\n   * The following functions represent stype casts -- conversions from one\n   * stype to another. Each of these is the C accessor for a templated C++\n   * function.\n   */\n\n\n  STORAGE* nm_yale_storage_from_dense(const STORAGE* right, nm::dtype_t l_dtype, void* init) {\n    NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::create_from_dense_storage, YALE_STORAGE*, const DENSE_STORAGE* rhs, nm::dtype_t l_dtype, void*);\n\n    if (!ttable[l_dtype][right->dtype]) {\n      rb_raise(nm_eDataTypeError, \"casting between these dtypes is undefined\");\n      return NULL;\n    }\n\n    return (STORAGE*)ttable[l_dtype][right->dtype]((const DENSE_STORAGE*)right, l_dtype, init);\n  }\n\n  STORAGE* nm_yale_storage_from_list(const STORAGE* right, nm::dtype_t l_dtype, void* dummy) {\n    NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::create_from_list_storage, YALE_STORAGE*, const LIST_STORAGE* rhs, nm::dtype_t l_dtype);\n\n    if (!ttable[l_dtype][right->dtype]) {\n      rb_raise(nm_eDataTypeError, \"casting between these dtypes is undefined\");\n      return NULL;\n    }\n\n    return (STORAGE*)ttable[l_dtype][right->dtype]((const LIST_STORAGE*)right, l_dtype);\n  }\n\n  STORAGE* nm_dense_storage_from_list(const STORAGE* right, nm::dtype_t l_dtype, void* dummy) {\n    NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::dense_storage::create_from_list_storage, DENSE_STORAGE*, const LIST_STORAGE* rhs, nm::dtype_t l_dtype);\n\n    if (!ttable[l_dtype][right->dtype]) {\n      rb_raise(nm_eDataTypeError, \"casting between these dtypes is undefined\");\n      return NULL;\n    }\n\n    return (STORAGE*)ttable[l_dtype][right->dtype]((const LIST_STORAGE*)right, l_dtype);\n  }\n\n  STORAGE* nm_dense_storage_from_yale(const STORAGE* right, nm::dtype_t l_dtype, void* dummy) {\n    NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::dense_storage::create_from_yale_storage, DENSE_STORAGE*, const YALE_STORAGE* rhs, nm::dtype_t l_dtype);\n\n    const YALE_STORAGE* casted_right = reinterpret_cast<const YALE_STORAGE*>(right);\n\n    if (!ttable[l_dtype][right->dtype]) {\n      rb_raise(nm_eDataTypeError, \"casting between these dtypes is undefined\");\n      return NULL;\n    }\n\n    return reinterpret_cast<STORAGE*>(ttable[l_dtype][right->dtype](casted_right, l_dtype));\n  }\n\n  STORAGE* nm_list_storage_from_dense(const STORAGE* right, nm::dtype_t l_dtype, void* init) {\n    NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::list_storage::create_from_dense_storage, LIST_STORAGE*, const DENSE_STORAGE*, nm::dtype_t, void*);\n\n    if (!ttable[l_dtype][right->dtype]) {\n      rb_raise(nm_eDataTypeError, \"casting between these dtypes is undefined\");\n      return NULL;\n    }\n\n    return (STORAGE*)ttable[l_dtype][right->dtype]((DENSE_STORAGE*)right, l_dtype, init);\n  }\n\n  STORAGE* nm_list_storage_from_yale(const STORAGE* right, nm::dtype_t l_dtype, void* dummy) {\n    NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::list_storage::create_from_yale_storage, LIST_STORAGE*, const YALE_STORAGE* rhs, nm::dtype_t l_dtype);\n\n    const YALE_STORAGE* casted_right = reinterpret_cast<const YALE_STORAGE*>(right);\n\n    if (!ttable[l_dtype][right->dtype]) {\n      rb_raise(nm_eDataTypeError, \"casting between these dtypes is undefined\");\n      return NULL;\n    }\n\n    return (STORAGE*)ttable[l_dtype][right->dtype](casted_right, l_dtype);\n  }\n\n} // end of extern \"C\"\n\n"
  },
  {
    "path": "ext/nmatrix/storage/storage.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == storage.h\n//\n// This file brings together everything in the storage directory.  It should not\n// be included by anything in the storage directory, but should be included by\n// files needing to use the storage code.\n\n#ifndef STORAGE_H\n#define STORAGE_H\n\n/*\n * Standard Includes\n */\n\n#include <ruby.h>\n#include <cstdlib>\n\n/*\n * Project Includes\n */\n\n#include \"types.h\"\n\n#include \"data/data.h\"\n\n#include \"common.h\"\n#include \"dense/dense.h\"\n#include \"list/list.h\"\n#include \"yale/yale.h\"\n\n/*\n * Macros\n */\n\n#define NMATRIX_DTYPE_IS_COMPLEX(s)    ((s->dtype == nm::COMPLEX64) or (s->dtype == nm::COMPLEX128))\n#define NMATRIX_DTYPE_IS_FLOAT(s)      ((s->dtype == nm::FLOAT32) or (s->dtype == nm::FLOAT64))\n#define NMATRIX_DTYPE_IS_INTEGER(s)    (s->dtype <= nm::INT64)\n#define NMATRIX_DTYPE_IS_RUBYOBJ(s)    (s->dtype == nm::RUBYOBJ)\n\n\n/*\n * Types\n */\n\n\n/*\n * Data\n */\n\nnamespace nm {\n  const int NUM_STYPES = 3;\n}\n\nextern \"C\" {\n\n  extern const char* const STYPE_NAMES[nm::NUM_STYPES];\n  extern void (* const STYPE_MARK[nm::NUM_STYPES])(STORAGE*);\n\n  /*\n   * Functions\n   */\n\n  /////////////////////////\n  // Copying and Casting //\n  /////////////////////////\n\n  STORAGE*    nm_dense_storage_from_list(const STORAGE* right, nm::dtype_t l_dtype, void*);\n  STORAGE*    nm_dense_storage_from_yale(const STORAGE* right, nm::dtype_t l_dtype, void*);\n  STORAGE*    nm_list_storage_from_dense(const STORAGE* right, nm::dtype_t l_dtype, void*);\n  STORAGE*    nm_list_storage_from_yale(const STORAGE* right,  nm::dtype_t l_dtype, void*);\n  STORAGE*    nm_yale_storage_from_list(const STORAGE* right,  nm::dtype_t l_dtype, void*);\n  STORAGE*    nm_yale_storage_from_dense(const STORAGE* right, nm::dtype_t l_dtype, void*);\n\n} // end of extern \"C\" block\n\n\n#endif // STORAGE_H\n"
  },
  {
    "path": "ext/nmatrix/storage/yale/class.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == class.h\n//\n// Object-oriented interface for Yale.\n//\n\n#ifndef YALE_CLASS_H\n# define YALE_CLASS_H\n\n#include \"../dense/dense.h\"\n#include \"math/transpose.h\"\n#include \"yale.h\"\n\nnamespace nm {\n\n\n/*\n * This class is basically an intermediary for YALE_STORAGE objects which enables us to treat it like a C++ object. It\n * keeps the src pointer as its s, along with other relevant slice information.\n *\n * It's useful for creating iterators and such. It isn't responsible for allocating or freeing its YALE_STORAGE* pointers.\n */\n\ntemplate <typename D>\nclass YaleStorage {\npublic:\n  YaleStorage(const YALE_STORAGE* storage)\n   : s(reinterpret_cast<YALE_STORAGE*>(storage->src)),\n     slice(storage != storage->src),\n     slice_shape(storage->shape),\n     slice_offset(storage->offset)\n  {\n    nm_yale_storage_register(storage->src);\n  }\n\n  YaleStorage(const STORAGE* storage)\n   : s(reinterpret_cast<YALE_STORAGE*>(storage->src)),\n     slice(storage != storage->src),\n     slice_shape(storage->shape),\n     slice_offset(storage->offset)\n  {\n    nm_yale_storage_register(reinterpret_cast<STORAGE*>(storage->src));\n  }\n\n  ~YaleStorage() {\n    nm_yale_storage_unregister(s);\n  }\n\n  /* Allows us to do YaleStorage<uint8>::dtype() to get an nm::dtype_t */\n  static nm::dtype_t dtype() {\n    return nm::ctype_to_dtype_enum<D>::value_type;\n  }\n\n\n  bool is_ref() const { return slice; }\n\n  inline D* default_obj_ptr() { return &(a(s->shape[0])); }\n  inline D& default_obj() { return a(s->shape[0]); }\n  inline const D& default_obj() const { return a(s->shape[0]); }\n  inline const D& const_default_obj() const { return a(s->shape[0]); }\n\n\n  /*\n   * Return a Ruby VALUE representation of default_obj()\n   */\n  VALUE const_default_value() const {\n    return nm::yale_storage::nm_rb_dereference(a(s->shape[0]));\n  }\n\n  inline size_t* ija_p()       const       { return reinterpret_cast<size_t*>(s->ija); }\n  inline const size_t& ija(size_t p) const { return ija_p()[p]; }\n  inline size_t& ija(size_t p)             { return ija_p()[p]; }\n  inline D* a_p()         const       { return reinterpret_cast<D*>(s->a); }\n  inline const D& a(size_t p) const   { return a_p()[p]; }\n  inline D& a(size_t p)               { return a_p()[p]; }\n\n  bool real_row_empty(size_t i) const { return ija(i+1) - ija(i) == 0 ? true : false; }\n\n  inline size_t* shape_p()        const { return slice_shape;      }\n  inline size_t  shape(uint8_t d) const { return slice_shape[d];   }\n  inline size_t* real_shape_p() const { return s->shape;           }\n  inline size_t  real_shape(uint8_t d) const { return s->shape[d]; }\n  inline size_t* offset_p()     const { return slice_offset;       }\n  inline size_t  offset(uint8_t d) const { return slice_offset[d]; }\n  inline size_t  capacity() const { return s->capacity;            }\n  inline size_t  size() const { return ija(real_shape(0));         }\n\n\n  /*\n   * Returns true if the value at apos is the default value.\n   * Mainly used for determining if the diagonal contains zeros.\n   */\n  bool is_pos_default_value(size_t apos) const {\n    return (a(apos) == const_default_obj());\n  }\n\n  /*\n   * Given a size-2 array of size_t, representing the shape, determine\n   * the maximum size of YaleStorage arrays.\n   */\n  static size_t max_size(const size_t* shape) {\n    size_t result = shape[0] * shape[1] + 1;\n    if (shape[0] > shape[1])\n      result += shape[0] - shape[1];\n    return result;\n  }\n\n  /*\n   * Minimum size of Yale Storage arrays given some shape.\n   */\n  static size_t min_size(const size_t* shape) {\n    return shape[0]*2 + 1;\n  }\n\n  /*\n   * This is the guaranteed maximum size of the IJA/A arrays of the matrix given its shape.\n   */\n  inline size_t real_max_size() const {\n    return YaleStorage<D>::max_size(real_shape_p());\n  }\n\n  // Binary search between left and right in IJA for column ID real_j. Returns left if not found.\n  size_t real_find_pos(size_t left, size_t right, size_t real_j, bool& found) const {\n    if (left > right) {\n      found = false;\n      return left;\n    }\n\n    size_t mid   = (left + right) / 2;\n    size_t mid_j = ija(mid);\n\n    if (mid_j == real_j) {\n      found = true;\n      return mid;\n    } else if (mid_j > real_j)  return real_find_pos(left, mid - 1, real_j, found);\n    else                        return real_find_pos(mid + 1, right, real_j, found);\n  }\n\n  // Binary search between left and right in IJA for column ID real_j. Essentially finds where the slice should begin,\n  // with no guarantee that there's anything in there.\n  size_t real_find_left_boundary_pos(size_t left, size_t right, size_t real_j) const {\n    if (left > right) return right;\n    if (ija(left) >= real_j) return left;\n\n    size_t mid   = (left + right) / 2;\n    size_t mid_j = ija(mid);\n\n    if (mid_j == real_j)      return mid;\n    else if (mid_j > real_j)  return real_find_left_boundary_pos(left, mid, real_j);\n    else                      return real_find_left_boundary_pos(mid + 1, right, real_j);\n  }\n\n  // Binary search between left and right in IJA for column ID real_j. Essentially finds where the slice should begin,\n  // with no guarantee that there's anything in there.\n  size_t real_find_right_boundary_pos(size_t left, size_t right, size_t real_j) const {\n    if (left > right) return right;\n    if (ija(right) <= real_j) return right;\n\n    size_t mid   = (left + right) / 2;\n    size_t mid_j = ija(mid);\n\n    if (mid_j == real_j)      return mid;\n    else if (mid_j > real_j)  return real_find_right_boundary_pos(left, mid, real_j);\n    else                      return real_find_right_boundary_pos(mid + 1, right, real_j);\n  }\n\n\n  // Binary search for coordinates i,j in the slice. If not found, return -1.\n  std::pair<size_t,bool> find_pos(const std::pair<size_t,size_t>& ij) const {\n    size_t left   = ija(ij.first + offset(0));\n    size_t right  = ija(ij.first + offset(0) + 1) - 1;\n\n    std::pair<size_t, bool> result;\n    result.first = real_find_pos(left, right, ij.second + offset(1), result.second);\n    return result;\n  }\n\n  // Binary search for coordinates i,j in the slice, and return the first position >= j in row i.\n  size_t find_pos_for_insertion(size_t i, size_t j) const {\n    size_t left   = ija(i + offset(0));\n    size_t right  = ija(i + offset(0) + 1) - 1;\n\n    // Check that the right search point is valid. rflbp will check to make sure the left is valid relative to left.\n    if (right > ija(real_shape(0))) {\n      right = ija(real_shape(0))-1;\n    }\n    size_t result = real_find_left_boundary_pos(left, right, j + offset(1));\n    return result;\n  }\n\n  typedef yale_storage::basic_iterator_T<D,D,YaleStorage<D> >              basic_iterator;\n  typedef yale_storage::basic_iterator_T<D,const D,const YaleStorage<D> >  const_basic_iterator;\n\n  typedef yale_storage::stored_diagonal_iterator_T<D,D,YaleStorage<D> >              stored_diagonal_iterator;\n  typedef yale_storage::stored_diagonal_iterator_T<D,const D,const YaleStorage<D> >  const_stored_diagonal_iterator;\n\n  typedef yale_storage::iterator_T<D,D,YaleStorage<D> >                iterator;\n  typedef yale_storage::iterator_T<D,const D,const YaleStorage<D> >    const_iterator;\n\n\n  friend class yale_storage::row_iterator_T<D,D,YaleStorage<D> >;\n  typedef yale_storage::row_iterator_T<D,D,YaleStorage<D> >             row_iterator;\n  typedef yale_storage::row_iterator_T<D,const D,const YaleStorage<D> > const_row_iterator;\n\n  typedef yale_storage::row_stored_iterator_T<D,D,YaleStorage<D>,row_iterator>    row_stored_iterator;\n  typedef yale_storage::row_stored_nd_iterator_T<D,D,YaleStorage<D>,row_iterator> row_stored_nd_iterator;\n  typedef yale_storage::row_stored_iterator_T<D,const D,const YaleStorage<D>,const_row_iterator>       const_row_stored_iterator;\n  typedef yale_storage::row_stored_nd_iterator_T<D,const D,const YaleStorage<D>,const_row_iterator>    const_row_stored_nd_iterator;\n  typedef std::pair<row_iterator,row_stored_nd_iterator>                                               row_nd_iter_pair;\n\n  // Variety of iterator begin and end functions.\n  iterator begin(size_t row = 0)                      {      return iterator(*this, row);                 }\n  iterator row_end(size_t row)                        {      return begin(row+1);                         }\n  iterator end()                                      {      return iterator(*this, shape(0));            }\n  const_iterator cbegin(size_t row = 0) const         {      return const_iterator(*this, row);           }\n  const_iterator crow_end(size_t row) const           {      return cbegin(row+1);                        }\n  const_iterator cend() const                         {      return const_iterator(*this, shape(0));      }\n\n  stored_diagonal_iterator sdbegin(size_t d = 0)      {      return stored_diagonal_iterator(*this, d);   }\n  stored_diagonal_iterator sdend()                    {\n    return stored_diagonal_iterator(*this, std::min( shape(0) + offset(0), shape(1) + offset(1) ) - std::max(offset(0), offset(1)) );\n  }\n  const_stored_diagonal_iterator csdbegin(size_t d = 0) const { return const_stored_diagonal_iterator(*this, d); }\n  const_stored_diagonal_iterator csdend() const        {\n    return const_stored_diagonal_iterator(*this, std::min( shape(0) + offset(0), shape(1) + offset(1) ) - std::max(offset(0), offset(1)) );\n  }\n  row_iterator ribegin(size_t row = 0)                {      return row_iterator(*this, row);             }\n  row_iterator riend()                                {      return row_iterator(*this, shape(0));        }\n  const_row_iterator cribegin(size_t row = 0) const   {      return const_row_iterator(*this, row);       }\n  const_row_iterator criend() const                   {      return const_row_iterator(*this, shape(0));  }\n\n\n  /*\n   * Get a count of the ndnz in the slice as if it were its own matrix.\n   */\n  size_t count_copy_ndnz() const {\n    if (!slice) return s->ndnz; // easy way -- not a slice.\n    size_t count = 0;\n\n    // Visit all stored entries.\n    for (const_row_iterator it = cribegin(); it != criend(); ++it){\n      for (auto jt = it.begin(); jt != it.end(); ++jt) {\n        if (it.i() != jt.j() && *jt != const_default_obj()) ++count;\n      }\n    }\n\n    return count;\n  }\n\n  /*\n   * Returns the iterator for i,j or snd_end() if not found.\n   */\n/*  stored_nondiagonal_iterator find(const std::pair<size_t,size_t>& ij) {\n    std::pair<size_t,bool> find_pos_result = find_pos(ij);\n    if (!find_pos_result.second) return sndend();\n    else return stored_nondiagonal_iterator(*this, ij.first, find_pos_result.first);\n  } */\n\n  /*\n   * Returns a stored_nondiagonal_iterator pointing to the location where some coords i,j should go, or returns their\n   * location if present.\n   */\n  /*std::pair<row_iterator, row_stored_nd_iterator> lower_bound(const std::pair<size_t,size_t>& ij)  {\n    row_iterator it            = ribegin(ij.first);\n    row_stored_nd_iterator jt  = it.lower_bound(ij.second);\n    return std::make_pair(it,jt);\n  } */\n\n  class multi_row_insertion_plan {\n  public:\n    std::vector<size_t>   pos;\n    std::vector<int>      change;\n    int                   total_change; // the net change occurring\n    size_t                num_changes;  // the total number of rows that need to change size\n    multi_row_insertion_plan(size_t rows_in_slice) : pos(rows_in_slice), change(rows_in_slice), total_change(0), num_changes(0) { }\n\n    void add(size_t i, const std::pair<int,size_t>& change_and_pos) {\n      pos[i]        = change_and_pos.second;\n      change[i]     = change_and_pos.first;\n      total_change += change_and_pos.first;\n      if (change_and_pos.first != 0) num_changes++;\n    }\n  };\n\n\n  /*\n   * Find all the information we need in order to modify multiple rows.\n   */\n  multi_row_insertion_plan insertion_plan(row_iterator i, size_t j, size_t* lengths, D* const v, size_t v_size) const {\n    multi_row_insertion_plan p(lengths[0]);\n\n    // v_offset is our offset in the array v. If the user wants to change two elements in each of three rows,\n    // but passes an array of size 3, we need to know that the second insertion plan must start at position\n    // 2 instead of 0; and then the third must start at 1.\n    size_t v_offset = 0;\n    for (size_t m = 0; m < lengths[0]; ++m, ++i) {\n      p.add(m, i.single_row_insertion_plan(j, lengths[1], v, v_size, v_offset));\n    }\n\n    return p;\n  }\n\n\n\n  /*\n   * Insert entries in multiple rows. Slice-setting.\n   */\n  void insert(row_iterator i, size_t j, size_t* lengths, D* const v, size_t v_size) {\n    // Expensive pre-processing step: find all the information we need in order to do insertions.\n    multi_row_insertion_plan p = insertion_plan(i, j, lengths, v, v_size);\n\n    // There are more efficient ways to do this, but this is the low hanging fruit version of the algorithm.\n    // Here's the full problem: http://stackoverflow.com/questions/18753375/algorithm-for-merging-short-lists-into-a-long-vector\n    // --JW\n\n    bool resize = false;\n    size_t sz = size();\n    if (p.num_changes > 1) resize = true; // TODO: There are surely better ways to do this, but I've gone for the low-hanging fruit\n    else if (sz + p.total_change > capacity() || sz + p.total_change <= capacity() / nm::yale_storage::GROWTH_CONSTANT) resize = true;\n\n    if (resize) {\n      update_resize_move_insert(i.i() + offset(0), j + offset(1), lengths, v, v_size, p);\n    } else {\n\n      // Make the necessary modifications, which hopefully can be done in-place.\n      size_t v_offset = 0;\n      //int accum       = 0;\n      for (size_t ii = 0; ii < lengths[0]; ++ii, ++i) {\n        i.insert(row_stored_nd_iterator(i, p.pos[ii]), j, lengths[1], v, v_size, v_offset);\n      }\n    }\n  }\n\n\n  /*\n   * Most Ruby-centric insert function. Accepts coordinate information in slice,\n   * and value information of various types in +right+. This function must evaluate\n   * +right+ and determine what other functions to call in order to properly handle\n   * it.\n   */\n  void insert(SLICE* slice, VALUE right) {\n\n    NM_CONSERVATIVE(nm_register_value(&right));\n\n    std::pair<NMATRIX*,bool> nm_and_free =\n      interpret_arg_as_dense_nmatrix(right, dtype());\n    // Map the data onto D* v\n\n    D*     v;\n    size_t v_size = 1;\n\n    if (nm_and_free.first) {\n      DENSE_STORAGE* s = reinterpret_cast<DENSE_STORAGE*>(nm_and_free.first->storage);\n      v       = reinterpret_cast<D*>(s->elements);\n      v_size  = nm_storage_count_max_elements(s);\n\n    } else if (RB_TYPE_P(right, T_ARRAY)) {\n      v_size = RARRAY_LEN(right);\n      v      = NM_ALLOC_N(D, v_size);\n      if (dtype() == nm::RUBYOBJ) {\n       nm_register_values(reinterpret_cast<VALUE*>(v), v_size);\n      }\n      for (size_t m = 0; m < v_size; ++m) {\n        rubyval_to_cval(rb_ary_entry(right, m), s->dtype, &(v[m]));\n      }\n      if (dtype() == nm::RUBYOBJ) {\n       nm_unregister_values(reinterpret_cast<VALUE*>(v), v_size);\n      }\n\n    } else {\n      v = reinterpret_cast<D*>(rubyobj_to_cval(right, dtype()));\n    }\n\n    row_iterator i = ribegin(slice->coords[0]);\n\n    if (slice->single || (slice->lengths[0] == 1 && slice->lengths[1] == 1)) { // single entry\n      i.insert(slice->coords[1], *v);\n    } else if (slice->lengths[0] == 1) { // single row, multiple entries\n      i.insert(slice->coords[1], slice->lengths[1], v, v_size);\n    } else { // multiple rows, unknown number of entries\n      insert(i, slice->coords[1], slice->lengths, v, v_size);\n    }\n\n    // Only free v if it was allocated in this function.\n    if (nm_and_free.first) {\n      if (nm_and_free.second) {\n        nm_delete(nm_and_free.first);\n      }\n    } else NM_FREE(v);\n\n    NM_CONSERVATIVE(nm_unregister_value(&right));\n  }\n\n\n  /*\n   * Remove an entry from an already found non-diagonal position.\n   */\n  row_iterator erase(row_iterator it, const row_stored_nd_iterator& position) {\n    it.erase(position);\n    return it;\n  }\n\n\n  /*\n   * Remove an entry from the matrix at the already-located position. If diagonal, just sets to default; otherwise,\n   * actually removes the entry.\n   */\n  row_iterator erase(row_iterator it, const row_stored_iterator& jt) {\n    it.erase((const row_stored_nd_iterator&)jt);\n    return it;\n  }\n\n\n  row_iterator insert(row_iterator it, row_stored_iterator position, size_t j, const D& val) {\n    it.insert(position, j, val);\n    return it;\n  }\n\n\n  /*\n   * Insert an element in column j, using position's p() as the location to insert the new column. i and j will be the\n   * coordinates. This also does a replace if column j is already present.\n   *\n   * Returns true if a new entry was added and false if an entry was replaced.\n   *\n   * Pre-conditions:\n   *   - position.p() must be between ija(real_i) and ija(real_i+1), inclusive, where real_i = i + offset(0)\n   *   - real_i and real_j must not be equal\n   */\n  row_iterator insert(row_iterator it, row_stored_nd_iterator position, size_t j, const D& val) {\n    it.insert(position, j, val);\n    return it;\n  }\n\n\n  /*\n   * Insert n elements v in columns j, using position as a guide. i gives the starting row. If at any time a value in j\n   * decreases,\n   */\n  /*bool insert(stored_iterator position, size_t n, size_t i, size_t* j, DType* v) {\n\n  } */\n\n  /*\n   * A pseudo-insert operation, since the diagonal portion of the A array is constant size.\n   */\n  stored_diagonal_iterator insert(stored_diagonal_iterator position, const D& val) {\n    *position = val;\n    return position;\n  }\n\n\n/*  iterator insert(iterator position, size_t j, const D& val) {\n    if (position.real_i() == position.real_j()) {\n      s->a(position.real_i()) = val;\n      return position;\n    } else {\n      row_iterator it = ribegin(position.i());\n      row_stored_nd_iterator position = it.ndbegin(j);\n      return insert(it, position, j, val);\n    }\n  }*/\n\n\n\n\n  /*\n   * Returns a pointer to the location of some entry in the matrix.\n   *\n   * This is needed for backwards compatibility. We don't really want anyone\n   * to modify the contents of that pointer, because it might be the ZERO location.\n   *\n   * TODO: Change all storage_get functions to return a VALUE once we've put list and\n   * dense in OO mode. ???\n   */\n  inline D* get_single_p(SLICE* slice) {\n    size_t real_i = offset(0) + slice->coords[0],\n           real_j = offset(1) + slice->coords[1];\n\n    if (real_i == real_j)\n      return &(a(real_i));\n\n    if (ija(real_i) == ija(real_i+1))\n      return default_obj_ptr(); // zero pointer\n\n    // binary search for a column's location\n    std::pair<size_t,bool> p = find_pos(std::make_pair(slice->coords[0], slice->coords[1]));\n    if (p.second)\n      return &(a(p.first));\n                       // not found: return default\n    return default_obj_ptr(); // zero pointer\n  }\n\n\n  /*\n   * Allocate a reference pointing to s. Note that even if +this+ is a reference,\n   * we can create a reference within it.\n   *\n   * Note: Make sure you NM_FREE() the result of this call. You can't just cast it\n   * directly into a YaleStorage<D> class.\n   */\n  YALE_STORAGE* alloc_ref(SLICE* slice) {\n    YALE_STORAGE* ns  = NM_ALLOC( YALE_STORAGE );\n\n    ns->dim           = s->dim;\n    ns->offset        = NM_ALLOC_N(size_t, ns->dim);\n    ns->shape         = NM_ALLOC_N(size_t, ns->dim);\n\n    for (size_t d = 0; d < ns->dim; ++d) {\n      ns->offset[d]   = slice->coords[d]  + offset(d);\n      ns->shape[d]    = slice->lengths[d];\n    }\n\n    ns->dtype         = s->dtype;\n    ns->a             = a_p();\n    ns->ija           = ija_p();\n\n    ns->src           = s;\n    s->count++;\n\n    ns->ndnz          = 0;\n    ns->capacity      = 0;\n\n    return ns;\n  }\n\n\n  /*\n   * Allocates and initializes the basic struct (but not IJA or A vectors).\n   */\n  static YALE_STORAGE* alloc(size_t* shape, size_t dim = 2) {\n    YALE_STORAGE* s = NM_ALLOC( YALE_STORAGE );\n\n    s->ndnz         = 0;\n    s->dtype        = dtype();\n    s->shape        = shape;\n    s->offset       = NM_ALLOC_N(size_t, dim);\n    for (size_t d = 0; d < dim; ++d)\n      s->offset[d]  = 0;\n    s->dim          = dim;\n    s->src          = reinterpret_cast<STORAGE*>(s);\n    s->count        = 1;\n\n    return s;\n  }\n\n\n  /*\n   * Create basic storage of same dtype as YaleStorage<D>. Allocates it,\n   * reserves necessary space, but doesn't fill structure at all.\n   */\n  static YALE_STORAGE* create(size_t* shape, size_t reserve) {\n\n    YALE_STORAGE* s = alloc( shape, 2 );\n    size_t max_sz   = YaleStorage<D>::max_size(shape),\n           min_sz   = YaleStorage<D>::min_size(shape);\n\n    if (reserve < min_sz) {\n      s->capacity = min_sz;\n    } else if (reserve > max_sz) {\n      s->capacity = max_sz;\n    } else {\n      s->capacity = reserve;\n    }\n\n    s->ija = NM_ALLOC_N( size_t, s->capacity );\n    s->a   = NM_ALLOC_N( D,      s->capacity );\n\n    return s;\n  }\n\n\n  /*\n   * Clear out the D portion of the A vector (clearing the diagonal and setting\n   * the zero value).\n   */\n  static void clear_diagonal_and_zero(YALE_STORAGE& s, D* init_val = NULL) {\n    D* a  = reinterpret_cast<D*>(s.a);\n\n    // Clear out the diagonal + one extra entry\n    if (init_val) {\n      for (size_t i = 0; i <= s.shape[0]; ++i)\n        a[i] = *init_val;\n    } else {\n      for (size_t i = 0; i <= s.shape[0]; ++i)\n        a[i] = 0;\n    }\n  }\n\n\n  /*\n   * Empty the matrix by initializing the IJA vector and setting the diagonal to 0.\n   *\n   * Called when most YALE_STORAGE objects are created.\n   *\n   * Can't go inside of class YaleStorage because YaleStorage creation requires that\n   * IJA already be initialized.\n   */\n  static void init(YALE_STORAGE& s, D* init_val) {\n    size_t IA_INIT = s.shape[0] + 1;\n    for (size_t m = 0; m < IA_INIT; ++m) {\n      s.ija[m] = IA_INIT;\n    }\n\n    clear_diagonal_and_zero(s, init_val);\n  }\n\n\n  /*\n   * Make a very basic allocation. No structure or copy or anything. It'll be shaped like this\n   * matrix.\n   *\n   * TODO: Combine this with ::create()'s ::alloc(). These are redundant.\n   */\n   template <typename E>\n   YALE_STORAGE* alloc_basic_copy(size_t new_capacity, size_t new_ndnz) const {\n     nm::dtype_t new_dtype = nm::ctype_to_dtype_enum<E>::value_type;\n     YALE_STORAGE* lhs     = NM_ALLOC( YALE_STORAGE );\n     lhs->dim              = s->dim;\n     lhs->shape            = NM_ALLOC_N( size_t, lhs->dim );\n\n     lhs->shape[0]         = shape(0);\n     lhs->shape[1]         = shape(1);\n\n     lhs->offset           = NM_ALLOC_N( size_t, lhs->dim );\n\n     lhs->offset[0]        = 0;\n     lhs->offset[1]        = 0;\n\n     lhs->capacity         = new_capacity;\n     lhs->dtype            = new_dtype;\n     lhs->ndnz             = new_ndnz;\n     lhs->ija              = NM_ALLOC_N( size_t, new_capacity );\n     lhs->a                = NM_ALLOC_N( E,      new_capacity );\n     lhs->src              = lhs;\n     lhs->count            = 1;\n\n     return lhs;\n   }\n\n\n  /*\n   * Make a full matrix structure copy (entries remain uninitialized). Remember to NM_FREE()!\n   */\n  template <typename E>\n  YALE_STORAGE* alloc_struct_copy(size_t new_capacity) const {\n    YALE_STORAGE* lhs     = alloc_basic_copy<E>(new_capacity, count_copy_ndnz());\n    // Now copy the IJA contents\n    if (slice) {\n      rb_raise(rb_eNotImpError, \"cannot copy struct due to different offsets\");\n    } else {\n      for (size_t m = 0; m < size(); ++m) {\n        lhs->ija[m] = ija(m); // copy indices\n      }\n    }\n    return lhs;\n  }\n\n\n  /*\n   * Copy this slice (or the full matrix if it isn't a slice) into a new matrix which is already allocated, ns.\n   */\n  template <typename E, bool Yield=false>\n  void copy(YALE_STORAGE& ns) const {\n    //nm::dtype_t new_dtype = nm::ctype_to_dtype_enum<E>::value_type;\n    // get the default value for initialization (we'll re-use val for other copies after this)\n    E val = static_cast<E>(const_default_obj());\n\n    // initialize the matrix structure and clear the diagonal so we don't have to\n    // keep track of unwritten entries.\n    YaleStorage<E>::init(ns, &val);\n\n    E* ns_a    = reinterpret_cast<E*>(ns.a);\n    size_t sz  = shape(0) + 1; // current used size of ns\n    nm_yale_storage_register(&ns);\n\n    // FIXME: If diagonals line up, it's probably faster to do this with stored diagonal and stored non-diagonal iterators\n    for (const_row_iterator it = cribegin(); it != criend(); ++it) {\n      for (auto jt = it.begin(); !jt.end(); ++jt) {\n        if (it.i() == jt.j()) {\n          if (Yield)  ns_a[it.i()] = rb_yield(~jt);\n          else        ns_a[it.i()] = static_cast<E>(*jt);\n        } else if (*jt != const_default_obj()) {\n          if (Yield)  ns_a[sz]     = rb_yield(~jt);\n          else        ns_a[sz]     = static_cast<E>(*jt);\n          ns.ija[sz]    = jt.j();\n          ++sz;\n        }\n      }\n      ns.ija[it.i()+1]  = sz;\n    }\n    nm_yale_storage_unregister(&ns);\n\n    //ns.ija[shape(0)] = sz;                // indicate end of last row\n    ns.ndnz          = sz - shape(0) - 1; // update ndnz count\n  }\n\n\n  /*\n   * Allocate a casted copy of this matrix/reference. Remember to NM_FREE() the result!\n   *\n   * If Yield is true, E must be nm::RubyObject, and it will call an rb_yield upon the stored value.\n   */\n  template <typename E, bool Yield = false>\n  YALE_STORAGE* alloc_copy() const {\n    //nm::dtype_t new_dtype = nm::ctype_to_dtype_enum<E>::value_type;\n\n    YALE_STORAGE* lhs;\n    if (slice) {\n      size_t* xshape    = NM_ALLOC_N(size_t, 2);\n      xshape[0]         = shape(0);\n      xshape[1]         = shape(1);\n      size_t ndnz       = count_copy_ndnz();\n      size_t reserve    = shape(0) + ndnz + 1;\n\n//      std::cerr << \"reserve = \" << reserve << std::endl;\n\n      lhs               = YaleStorage<E>::create(xshape, reserve);\n\n      // FIXME: This should probably be a throw which gets caught outside of the object.\n      if (lhs->capacity < reserve)\n        rb_raise(nm_eStorageTypeError, \"conversion failed; capacity of %lu requested, max allowable is %lu\", reserve, lhs->capacity);\n\n      // Fill lhs with what's in our current matrix.\n      copy<E, Yield>(*lhs);\n    } else {\n      // Copy the structure and setup the IJA structure.\n      lhs               = alloc_struct_copy<E>(s->capacity);\n\n      E* la = reinterpret_cast<E*>(lhs->a);\n\n      nm_yale_storage_register(lhs);\n      for (size_t m = 0; m < size(); ++m) {\n        if (Yield) {\n    la[m] = rb_yield(nm::yale_storage::nm_rb_dereference(a(m)));\n  }\n        else       la[m] = static_cast<E>(a(m));\n      }\n      nm_yale_storage_unregister(lhs);\n\n    }\n\n    return lhs;\n  }\n\n  /*\n   * Allocate a transposed copy of the matrix\n   */\n  /*\n   * Allocate a casted copy of this matrix/reference. Remember to NM_FREE() the result!\n   *\n   * If Yield is true, E must be nm::RubyObject, and it will call an rb_yield upon the stored value.\n   */\n  template <typename E, bool Yield = false>\n  YALE_STORAGE* alloc_copy_transposed() const {\n\n    if (slice) {\n      rb_raise(rb_eNotImpError, \"please make a copy before transposing\");\n    } else {\n      // Copy the structure and setup the IJA structure.\n      size_t* xshape    = NM_ALLOC_N(size_t, 2);\n      xshape[0]         = shape(1);\n      xshape[1]         = shape(0);\n\n      // Take a stab at the number of non-diagonal stored entries we'll have.\n      size_t reserve    = size() - xshape[1] + xshape[0];\n      YALE_STORAGE* lhs = YaleStorage<E>::create(xshape, reserve);\n      E r_init          = static_cast<E>(const_default_obj());\n      YaleStorage<E>::init(*lhs, &r_init);\n\n      nm::yale_storage::transpose_yale<D,E,true,true>(shape(0), shape(1), ija_p(), ija_p(), a_p(), const_default_obj(),\n                                                      lhs->ija, lhs->ija, reinterpret_cast<E*>(lhs->a), r_init);\n      return lhs;\n    }\n\n    return NULL;\n  }\n\n\n  /*\n   * Comparison between two matrices. Does not check size and such -- assumption is that they are the same shape.\n   */\n  template <typename E>\n  bool operator==(const YaleStorage<E>& rhs) const {\n    for (size_t i = 0; i < shape(0); ++i) {\n      typename YaleStorage<D>::const_row_iterator li = cribegin(i);\n      typename YaleStorage<E>::const_row_iterator ri = rhs.cribegin(i);\n\n      size_t j = 0; // keep track of j so we can compare different defaults\n\n      auto lj = li.begin();\n      auto rj = ri.begin();\n      while (!lj.end() || !rj.end()) {\n        if (lj < rj) {\n          if (*lj != rhs.const_default_obj()) return false;\n          ++lj;\n        } else if (rj < lj) {\n          if (const_default_obj() != *rj)     return false;\n          ++rj;\n        } else { // rj == lj\n          if (*lj != *rj) return false;\n          ++lj;\n          ++rj;\n        }\n        ++j;\n      }\n\n      // if we skip an entry (because it's an ndnz in BOTH matrices), we need to compare defaults.\n      // (We know we skipped if lj and rj hit end before j does.)\n      if (j < shape(1) && const_default_obj() != rhs.const_default_obj()) return false;\n\n      ++li;\n      ++ri;\n    }\n\n    return true;\n  }\n\n  /*\n   * Necessary for element-wise operations. The return dtype will be nm::RUBYOBJ.\n   */\n  template <typename E>\n  VALUE map_merged_stored(VALUE klass, nm::YaleStorage<E>& t, VALUE r_init) const {\n    nm_register_value(&r_init);\n    VALUE s_init    = const_default_value(),\n          t_init    = t.const_default_value();\n    nm_register_value(&s_init);\n    nm_register_value(&t_init);\n    \n    // Make a reasonable approximation of the resulting capacity\n    size_t s_ndnz   = count_copy_ndnz(),\n           t_ndnz   = t.count_copy_ndnz();\n    size_t reserve  = shape(0) + std::max(s_ndnz, t_ndnz) + 1;\n\n    size_t* xshape  = NM_ALLOC_N(size_t, 2);\n    xshape[0]       = shape(0);\n    xshape[1]       = shape(1);\n\n    YALE_STORAGE* rs= YaleStorage<nm::RubyObject>::create(xshape, reserve);\n\n    if (r_init == Qnil) {\n      nm_unregister_value(&r_init);\n      r_init       = rb_yield_values(2, s_init, t_init);\n      nm_register_value(&r_init);\n    }\n\n    nm::RubyObject r_init_obj(r_init);\n\n    // Prepare the matrix structure\n    YaleStorage<nm::RubyObject>::init(*rs, &r_init_obj);\n    NMATRIX* m     = nm_create(nm::YALE_STORE, reinterpret_cast<STORAGE*>(rs));\n    nm_register_nmatrix(m);\n    VALUE result   = Data_Wrap_Struct(klass, nm_mark, nm_delete, m);\n    nm_unregister_nmatrix(m);\n    nm_register_value(&result);\n    nm_unregister_value(&r_init);\n\n    RETURN_SIZED_ENUMERATOR_PRE\n    nm_unregister_value(&result);\n    nm_unregister_value(&t_init);\n    nm_unregister_value(&s_init);\n    // No obvious, efficient way to pass a length function as the fourth argument here:\n    RETURN_SIZED_ENUMERATOR(result, 0, 0, 0);\n\n    // Create an object for us to iterate over.\n    YaleStorage<nm::RubyObject> r(rs);\n\n    // Walk down our new matrix, inserting values as we go.\n    for (size_t i = 0; i < xshape[0]; ++i) {\n      YaleStorage<nm::RubyObject>::row_iterator   ri = r.ribegin(i);\n      typename YaleStorage<D>::const_row_iterator si = cribegin(i);\n      typename YaleStorage<E>::const_row_iterator ti = t.cribegin(i);\n\n      auto sj = si.begin();\n      auto tj = ti.begin();\n      auto rj = ri.ndbegin();\n\n      while (sj != si.end() || tj != ti.end()) {\n        VALUE  v;\n        size_t j;\n\n        if (sj < tj) {\n          v = rb_yield_values(2, ~sj, t_init);\n          j = sj.j();\n          ++sj;\n        } else if (tj < sj) {\n          v = rb_yield_values(2, s_init, ~tj);\n          j = tj.j();\n          ++tj;\n        } else {\n          v = rb_yield_values(2, ~sj, ~tj);\n          j = sj.j();\n          ++sj;\n          ++tj;\n        }\n\n        // FIXME: This can be sped up by inserting all at the same time\n        // since it's a new matrix. But that function isn't quite ready\n        // yet.\n        if (j == i) r.a(i) = v;\n        else        rj     = ri.insert(rj, j, v);\n        //RB_P(rb_funcall(result, rb_intern(\"yale_ija\"), 0));\n      }\n    }\n    nm_unregister_value(&result);\n    nm_unregister_value(&t_init);\n    nm_unregister_value(&s_init);\n\n    return result;\n  }\n\nprotected:\n  /*\n   * Update row sizes starting with row i\n   */\n  void update_real_row_sizes_from(size_t real_i, int change) {\n    ++real_i;\n    for (; real_i <= real_shape(0); ++real_i) {\n      ija(real_i) += change;\n    }\n  }\n\n\n  /*\n   * Like move_right, but also involving a resize. This updates row sizes as well. This version also takes a plan for\n   * multiple rows, and tries to do them all in one copy. It's used for multi-row slice-setting.\n   *\n   * This also differs from update_resize_move in that it resizes to the exact requested size instead of reserving space.\n   */\n  void update_resize_move_insert(size_t real_i, size_t real_j, size_t* lengths, D* const v, size_t v_size, multi_row_insertion_plan p) {\n    size_t sz      = size(); // current size of the storage vectors\n    size_t new_cap = sz + p.total_change;\n\n    if (new_cap > real_max_size()) {\n      NM_FREE(v);\n      rb_raise(rb_eStandardError, \"resize caused by insertion of size %d (on top of current size %lu) would have caused yale matrix size to exceed its maximum (%lu)\", p.total_change, sz, real_max_size());\n    }\n\n    if (s->dtype == nm::RUBYOBJ) {\n      nm_register_values(reinterpret_cast<VALUE*>(v), v_size);\n    }\n\n    size_t* new_ija     = NM_ALLOC_N( size_t,new_cap );\n    D* new_a            = NM_ALLOC_N( D,     new_cap );\n\n    // Copy unchanged row pointers first.\n    size_t m = 0;\n    for (; m <= real_i; ++m) {\n      new_ija[m]        = ija(m);\n      new_a[m]          = a(m);\n    }\n\n    // Now copy unchanged locations in IJA and A.\n    size_t q = real_shape(0)+1; // q is the copy-to position.\n    size_t r = real_shape(0)+1; // r is the copy-from position.\n    for (; r < p.pos[0]; ++r, ++q) {\n      new_ija[q]        = ija(r);\n      new_a[q]          = a(r);\n    }\n\n    // For each pos and change in the slice, copy the information prior to the insertion point. Then insert the necessary\n    // information.\n    size_t v_offset = 0;\n    int accum = 0; // keep track of the total change as we go so we can update row information.\n    for (size_t i = 0; i < lengths[0]; ++i, ++m) {\n      for (; r < p.pos[i]; ++r, ++q) {\n        new_ija[q]      = ija(r);\n        new_a[q]        = a(r);\n      }\n\n      // Insert slice data for a single row.\n      for (size_t j = 0; j < lengths[1]; ++j, ++v_offset) {\n        if (v_offset >= v_size) v_offset %= v_size;\n\n        if (j + real_j == i + real_i) { // modify diagonal\n          new_a[real_i + i] = v[v_offset];\n        } else if (v[v_offset] != const_default_obj()) {\n          new_ija[q]        = j + real_j;\n          new_a[q]          = v[v_offset];\n          ++q; // move on to next q location\n        }\n\n        if (r < ija(real_shape(0)) && ija(r) == j + real_j) ++r; // move r forward if the column matches.\n      }\n\n      // Update the row pointer for the current row.\n      accum                += p.change[i];\n      new_ija[m]            = ija(m) + accum;\n      new_a[m]              = a(m); // copy diagonal for this row\n    }\n\n    // Now copy everything subsequent to the last insertion point.\n    for (; r < size(); ++r, ++q) {\n      new_ija[q]            = ija(r);\n      new_a[q]              = a(r);\n    }\n\n    // Update the remaining row pointers and copy remaining diagonals\n    for (; m <= real_shape(0); ++m) {\n      new_ija[m]            = ija(m) + accum;\n      new_a[m]              = a(m);\n    }\n\n    s->capacity = new_cap;\n\n    NM_FREE(s->ija);\n    NM_FREE(s->a);\n\n    if (s->dtype == nm::RUBYOBJ) {\n      nm_unregister_values(reinterpret_cast<VALUE*>(v), v_size);\n    }   \n\n    s->ija      = new_ija;\n    s->a        = reinterpret_cast<void*>(new_a);\n  }\n\n\n\n\n  /*\n   * Like move_right, but also involving a resize. This updates row sizes as well.\n   */\n  void update_resize_move(row_stored_nd_iterator position, size_t real_i, int n) {\n    size_t sz      = size(); // current size of the storage vectors\n    size_t new_cap = n > 0 ? capacity() * nm::yale_storage::GROWTH_CONSTANT\n                           : capacity() / nm::yale_storage::GROWTH_CONSTANT;\n    size_t max_cap = real_max_size();\n\n    if (new_cap > max_cap) {\n      new_cap = max_cap;\n      if (sz + n > max_cap)\n        rb_raise(rb_eStandardError, \"resize caused by insertion/deletion of size %d (on top of current size %lu) would have caused yale matrix size to exceed its maximum (%lu)\", n, sz, real_max_size());\n    }\n\n    if (new_cap < sz + n) new_cap = sz + n;\n\n    size_t* new_ija     = NM_ALLOC_N( size_t,new_cap );\n    D* new_a            = NM_ALLOC_N( D,     new_cap );\n\n    // Copy unchanged row pointers first.\n    for (size_t m = 0; m <= real_i; ++m) {\n      new_ija[m]        = ija(m);\n      new_a[m]          = a(m);\n    }\n\n    // Now update row pointers following the changed row as we copy the additional values.\n    for (size_t m = real_i + 1; m <= real_shape(0); ++m) {\n      new_ija[m]        = ija(m) + n;\n      new_a[m]          = a(m);\n    }\n\n    // Copy all remaining prior to insertion/removal site\n    for (size_t m = real_shape(0) + 1; m < position.p(); ++m) {\n      new_ija[m]        = ija(m);\n      new_a[m]          = a(m);\n    }\n\n    // Copy all subsequent to insertion/removal site\n    size_t m = position.p();\n    if (n < 0) m -= n;\n\n    for (; m < sz; ++m) {\n      new_ija[m+n]      = ija(m);\n      new_a[m+n]        = a(m);\n    }\n\n    if (s->dtype == nm::RUBYOBJ) {\n      nm_yale_storage_register_a(new_a, new_cap);\n    }\n\n    s->capacity = new_cap;\n\n    NM_FREE(s->ija);\n    NM_FREE(s->a);\n\n    if (s->dtype == nm::RUBYOBJ) {\n      nm_yale_storage_unregister_a(new_a, new_cap);\n    }\n\n    s->ija      = new_ija;\n    s->a        = reinterpret_cast<void*>(new_a);\n  }\n\n\n  /*\n   * Move elements in the IJA and A arrays by n (to the right).\n   * Does not update row sizes.\n   */\n  void move_right(row_stored_nd_iterator position, size_t n) {\n    size_t sz = size();\n    for (size_t m = 0; m < sz - position.p(); ++m) {\n      ija(sz+n-1-m) = ija(sz-1-m);\n      a(sz+n-1-m)   = a(sz-1-m);\n    }\n  }\n\n  /*\n   * Move elements in the IJA and A arrays by n (to the left). Here position gives\n   * the location to move to, and they should come from n to the right.\n   */\n  void move_left(row_stored_nd_iterator position, size_t n) {\n    size_t sz = size();\n    for (size_t m = position.p() + n; m < sz; ++m) {   // work backwards\n      ija(m-n)      = ija(m);\n      a(m-n)        = a(m);\n    }\n  }\n\n  YALE_STORAGE* s;\n  bool          slice;\n  size_t*       slice_shape;\n  size_t*       slice_offset;\n};\n\n} // end of nm namespace\n\n#endif // YALE_CLASS_H\n"
  },
  {
    "path": "ext/nmatrix/storage/yale/iterators/base.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == base.h\n//\n// Yale storage pure virtual basic_iterator class.\n//\n\n#ifndef YALE_ITERATORS_BASE_H\n# define YALE_ITERATORS_BASE_H\n\n#include <ruby.h>\n#include <type_traits>\n#include <typeinfo>\n#include <stdexcept>\n\nnamespace nm {\n\ntemplate <typename D> class YaleStorage;\n\nnamespace yale_storage {\n\ntemplate <typename D>\nVALUE nm_rb_dereference(D const& v) {\n  return nm::RubyObject(v).rval;\n}\n\ntemplate <>\nVALUE nm_rb_dereference<nm::RubyObject>(nm::RubyObject const& v) {\n  return v.rval;\n}\n\n/*\n * Iterator base class (pure virtual).\n */\ntemplate <typename D,\n          typename RefType,\n          typename YaleRef = typename std::conditional<\n            std::is_const<RefType>::value,\n            const nm::YaleStorage<D>,\n            nm::YaleStorage<D>\n          >::type>\nclass basic_iterator_T {\n\nprotected:\n  YaleRef& y;\n  size_t i_;\n  size_t p_;\n\npublic:\n  size_t offset(size_t d) const { return y.offset(d); }\n  size_t shape(size_t d) const { return y.shape(d); }\n  size_t real_shape(size_t d) const { return y.real_shape(d); }\n\n  size_t dense_location() const {\n    return i()*shape(1) + j();\n  }\n\n  template <typename T = typename std::conditional<std::is_const<RefType>::value, const size_t, size_t>::type>\n  T& ija(size_t pp) const { return y.ija(pp); }\n\n  template <typename T = typename std::conditional<std::is_const<RefType>::value, const size_t, size_t>::type>\n  T& ija(size_t pp) { return y.ija(pp); }\n\n  virtual bool diag() const {\n    return p_ < std::min(y.real_shape(0), y.real_shape(1));\n  }\n  virtual bool done_with_diag() const {\n    return p_ == std::min(y.real_shape(0), y.real_shape(1));\n  }\n  virtual bool nondiag() const {\n    return p_ > std::min(y.real_shape(0), y.real_shape(1));\n  }\n\n  basic_iterator_T(YaleRef& obj, size_t ii = 0, size_t pp = 0) : y(obj), i_(ii), p_(pp) { }\n\n  basic_iterator_T<D,RefType,YaleRef>& operator=(const basic_iterator_T<D,RefType,YaleRef>& rhs) {\n    if (&y != &(rhs.y)) throw std::logic_error(\"can only be used on iterators with the same matrix\");\n    i_ = rhs.i_;\n    p_ = rhs.p_;\n    return *this;\n  }\n\n  virtual inline size_t i() const { return i_; }\n  virtual size_t j() const = 0;\n\n  virtual inline VALUE rb_i() const { return LONG2NUM(i()); }\n  virtual inline VALUE rb_j() const { return LONG2NUM(j()); }\n\n  virtual size_t real_i() const { return offset(0) + i(); }\n  virtual size_t real_j() const { return offset(1) + j(); }\n  virtual size_t p() const { return p_; }\n  virtual bool real_ndnz_exists() const { return !y.real_row_empty(real_i()) && ija(p_) == real_j(); }\n\n  virtual RefType& operator*() = 0;\n  virtual RefType& operator*() const = 0;\n\n\n  // Ruby VALUE de-reference\n  inline VALUE operator~() const {\n    return nm_rb_dereference<D>(**this);\n  //virtual VALUE operator~() const {\n  //  if (typeid(D) == typeid(RubyObject)) return (**this); // FIXME: return rval instead, faster;\n  //  else return RubyObject(*(*this)).rval;\n  }\n\n  virtual bool operator==(const std::pair<size_t,size_t>& ij) {\n    if (p() >= ija(real_shape(0))) return false;\n    else return i() == ij.first && j() == ij.second;\n  }\n\n  virtual bool operator==(const basic_iterator_T<D,RefType,YaleRef>& rhs) const {\n    return i() == rhs.i() && j() == rhs.j();\n  }\n  virtual bool operator!=(const basic_iterator_T<D,RefType,YaleRef>& rhs) const {\n    return i() != rhs.i() || j() != rhs.j();\n  }\n};\n\n\n} } // end of namespace nm::yale_storage\n\n#endif // YALE_ITERATORS_BASE_H\n"
  },
  {
    "path": "ext/nmatrix/storage/yale/iterators/iterator.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == iterator.h\n//\n// Iterate over yale as if dense\n//\n\n#ifndef YALE_ITERATORS_ITERATOR_H\n# define YALE_ITERATORS_ITERATOR_H\n\n#include <ruby.h>\n#include <type_traits>\n#include <typeinfo>\n\nnamespace nm { namespace yale_storage {\n\n/*\n * Iterator for traversing matrix class as if it were dense (visits each entry in order).\n */\ntemplate <typename D,\n          typename RefType,\n          typename YaleRef = typename std::conditional<\n            std::is_const<RefType>::value,\n            const nm::YaleStorage<D>,\n            nm::YaleStorage<D>\n          >::type>\nclass iterator_T : public basic_iterator_T<D,RefType,YaleRef> {\n  using basic_iterator_T<D,RefType,YaleRef>::i_;\n  using basic_iterator_T<D,RefType,YaleRef>::p_;\n  using basic_iterator_T<D,RefType,YaleRef>::y;\n  using basic_iterator_T<D,RefType,YaleRef>::offset;\n  using basic_iterator_T<D,RefType,YaleRef>::shape;\n  using basic_iterator_T<D,RefType,YaleRef>::ija;\n\nprotected:\n  size_t j_; // These are relative to the slice.\n\npublic:\n  // Create an iterator. May select the row since this is O(1).\n  iterator_T(YaleRef& obj, size_t ii = 0)\n  : basic_iterator_T<D,RefType,YaleRef>(obj, ii, obj.ija(ii + obj.offset(0))), j_(0)\n  {\n    // advance to the beginning of the row\n    if (obj.offset(1) > 0)\n      p_ = y.find_pos_for_insertion(i_,j_);\n  }\n\n  // Prefix ++\n  iterator_T<D,RefType,YaleRef>& operator++() {\n    size_t prev_j = j_++;\n    if (j_ >= shape(1)) {\n      j_ = 0;\n      ++i_;\n\n      // Do a binary search to find the beginning of the slice\n      p_ = offset(0) > 0 ? y.find_pos_for_insertion(i_,j_) : ija(i_);\n    } else {\n      // If the last j was actually stored in this row of the matrix, need to advance p.\n\n      if (!y.real_row_empty(i_ + offset(0)) && ija(p_) <= prev_j + offset(1)) ++p_;  // this test is the same as real_ndnz_exists\n    }\n\n    return *this;\n  }\n\n  iterator_T<D,RefType,YaleRef> operator++(int dummy) const {\n    iterator_T<D,RefType,YaleRef> iter(*this);\n    return ++iter;\n  }\n\n  virtual bool operator!=(const iterator_T<D,RefType,YaleRef>& rhs) const {\n    return this->dense_location() != rhs.dense_location();\n  }\n\n  virtual bool operator==(const iterator_T<D,RefType,YaleRef>& rhs) const {\n    return this->dense_location() == rhs.dense_location();\n  }\n\n  bool operator<(const iterator_T<D,RefType,YaleRef>& rhs) const {\n    return this->dense_location() < rhs.dense_location();\n  }\n\n  bool operator>(const iterator_T<D,RefType,YaleRef>& rhs) const {\n    return this->dense_location() > rhs.dense_location();\n  }\n\n  virtual bool diag() const { return i_ + offset(0) == j_ + offset(1); }\n\n  // De-reference\n  RefType& operator*() {\n    if (diag())                                                                return y.a( i_ + offset(0) );\n    else if (p_ >= ija(i_+offset(0)+1))                                        return y.const_default_obj();\n    else if (!y.real_row_empty(i_ + offset(0)) && ija(p_) == j_ + offset(1))   return y.a( p_ );\n    else                                                                       return y.const_default_obj();\n  }\n\n  RefType& operator*() const {\n    if (diag())                                                                return y.a( i_ + offset(0) );\n    else if (p_ >= ija(i_+offset(0)+1))                                        return y.const_default_obj();\n    else if (!y.real_row_empty(i_ + offset(0)) && ija(p_) == j_ + offset(1))   return y.a( p_ );\n    else                                                                       return y.const_default_obj();\n  }\n\n  virtual size_t j() const { return j_; }\n};\n\n\n} } // end of namespace nm::yale_storage\n\n#endif // YALE_ITERATORS_ITERATOR_H\n"
  },
  {
    "path": "ext/nmatrix/storage/yale/iterators/row.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == row.h\n//\n// Iterator for traversing a matrix row by row. Includes an\n// orthogonal iterator for visiting each stored entry in a row.\n// This one cannot be de-referenced; you have to de-reference\n// the column.\n\n#ifndef YALE_ITERATORS_ROW_H\n# define YALE_ITERATORS_ROW_H\n\n#include <ruby.h>\n#include <stdexcept>\n\nnamespace nm { namespace yale_storage {\n\ntemplate <typename D,\n          typename RefType,\n          typename YaleRef = typename std::conditional<\n            std::is_const<RefType>::value,\n            const nm::YaleStorage<D>,\n            nm::YaleStorage<D>\n          >::type>\nclass row_iterator_T {\n\nprotected:\n  YaleRef& y;\n  size_t i_;\n  size_t p_first, p_last; // first and last IJA positions in the row\n\n\n  /*\n   * Update the row positions -- use to ensure a row stays valid after an insert operation. Also\n   * used to initialize a row iterator at a different row index.\n   */\n  void update() {\n    if (i_ < y.shape(0)) {\n      p_first = p_real_first();\n      p_last  = p_real_last();\n      if (!nd_empty()) {\n        // try to find new p_first\n        p_first = y.real_find_left_boundary_pos(p_first, p_last, y.offset(1));\n        if (!nd_empty()) {\n          // also try to find new p_last\n          p_last = y.real_find_left_boundary_pos(p_first, p_last, y.offset(1) + y.shape(1) - 1);\n          if (y.ija(p_last) - y.offset(1) >= shape(1)) --p_last; // searched too far.\n        }\n      }\n    } else { // invalid row -- this is an end iterator.\n      p_first = y.ija(y.real_shape(0));\n      p_last  = y.ija(y.real_shape(0))-1; // mark as empty\n    }\n  }\n\n  /*\n   * Indicate to the row iterator that p_first and p_last have moved by some amount. Only\n   * defined for row_iterator, not const_row_iterator. This is a lightweight form of update().\n   */\n  //template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>\n  void shift(int amount) {\n    p_first += amount;\n    p_last  += amount;\n  }\n\n\n  /*\n   * Enlarge the row by amount by moving p_last over. This is a lightweight form of update().\n   */\n  //template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>\n  void adjust_length(int amount) {\n    p_last  += amount;\n  }\n\npublic:\n/*  typedef row_stored_iterator_T<D,RefType,YaleRef>                  row_stored_iterator;\n  typedef row_stored_nd_iterator_T<D,RefType,YaleRef>               row_stored_nd_iterator;\n  typedef row_stored_iterator_T<D,const RefType,const YaleRef>      const_row_stored_iterator;\n  typedef row_stored_nd_iterator_T<D,const RefType,const YaleRef>   const_row_stored_nd_iterator;*/\n  typedef row_stored_iterator_T<D,RefType,YaleRef, row_iterator_T<D,RefType,YaleRef> > row_stored_iterator;\n  typedef row_stored_nd_iterator_T<D,RefType,YaleRef, row_iterator_T<D,RefType,YaleRef> > row_stored_nd_iterator;\n  template <typename E, typename ERefType, typename EYaleRef> friend class row_iterator_T;\n  friend class row_stored_iterator_T<D,RefType,YaleRef, row_iterator_T<D,RefType,YaleRef> >;\n  friend class row_stored_nd_iterator_T<D,RefType,YaleRef, row_iterator_T<D,RefType,YaleRef> >;//row_stored_iterator;\n  friend class row_stored_iterator_T<D,RefType,YaleRef, const row_iterator_T<D,RefType,YaleRef> >;\n  friend class row_stored_nd_iterator_T<D,RefType,YaleRef, const row_iterator_T<D,RefType,YaleRef> >;//row_stored_iterator;\n  friend class nm::YaleStorage<D>;\n\n  //friend row_stored_nd_iterator;\n\n  inline size_t ija(size_t pp) const { return y.ija(pp); }\n  inline size_t& ija(size_t pp)      { return y.ija(pp); }\n  inline RefType& a(size_t p) const  { return y.a_p()[p]; }\n  inline RefType& a(size_t p)        { return y.a_p()[p]; }\n\n\n\n  row_iterator_T(YaleRef& obj, size_t ii = 0)\n  : y(obj), i_(ii)\n  {\n    update();\n  }\n\n\n  template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>\n  bool operator!=(const row_iterator_T<E,ERefType>& rhs) const {\n    return i_ != rhs.i_;\n  }\n\n  template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>\n  bool operator==(const row_iterator_T<E,ERefType>& rhs) const {\n    return i_ == rhs.i_;\n  }\n\n  template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>\n  bool operator<(const row_iterator_T<E,ERefType>& rhs) const {\n    return i_ < rhs.i_;\n  }\n\n  template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>\n  bool operator>(const row_iterator_T<E,ERefType>& rhs) const {\n    return i_ > rhs.i_;\n  }\n\n  row_iterator_T<D,RefType,YaleRef>& operator++() {\n    if (is_end()) throw std::out_of_range(\"attempted to iterate past end of slice (vertically)\");\n    ++i_;\n    update();\n    return *this;\n  }\n\n  row_iterator_T<D,RefType,YaleRef> operator++(int dummy) const {\n    row_iterator_T<D,RefType,YaleRef> next(*this);\n    return ++next;\n  }\n\n  bool is_end() const {\n    return i_ == y.shape(0) && p_first == y.ija(y.real_shape(0));\n  }\n\n  size_t real_i() const {\n    return i_ + y.offset(0);\n  }\n\n  size_t i() const {\n    return i_;\n  }\n\n  // last element of the real row\n  size_t p_real_last() const {\n    return y.ija(real_i()+1)-1;\n  }\n\n  // first element of the real row\n  size_t p_real_first() const {\n    return y.ija(real_i());\n  }\n\n  // Is the real row of the original matrix totally empty of NDs?\n  bool real_nd_empty() const {\n    return p_real_last() < p_real_first();\n  }\n\n  bool nd_empty() const {\n    return p_last < p_first;\n  }\n\n  // slice j coord of the diag.\n  size_t diag_j() const {\n    if (!has_diag())\n      throw std::out_of_range(\"don't call diag_j unless you've checked for one\");\n    return real_i() - y.offset(1);\n  }\n\n  // return the actual position of the diagonal element for this real row, regardless of whether\n  // it's in range or not.\n  size_t p_diag() const {\n    return real_i();\n  }\n\n  // Checks to see if there is a diagonal within the slice\n  bool has_diag() const {\n    // real position of diag is real_i == real_j. Is it in range?\n    return (p_diag() >= y.offset(1) && p_diag() - y.offset(1) < y.shape(1));\n  }\n\n  // Checks to see if the diagonal is the first entry in the slice.\n  bool is_diag_first() const {\n    if (!has_diag()) return false;\n    if (nd_empty())  return true;\n    return diag_j() < y.ija(p_first) - y.offset(1);\n  }\n\n  // Checks to see if the diagonal is the last entry in the slice.\n  bool is_diag_last() const {\n    if (!has_diag()) return false;\n    if (nd_empty())  return true;\n    return diag_j() > y.ija(p_last);\n  }\n\n  // Is the row of the slice totally empty of NDs and Ds?\n  // We can only determine that it's empty of Ds if the diagonal\n  // is not a part of the sliced portion of the row.\n  bool empty() const {\n    return nd_empty() && has_diag();\n  }\n\n\n  size_t shape(size_t pp) const {\n    return y.shape(pp);\n  }\n\n  size_t offset(size_t pp) const {\n    return y.offset(pp);\n  }\n\n  inline VALUE rb_i() const { return LONG2NUM(i()); }\n\n  row_stored_iterator_T<D,RefType,YaleRef> begin() {  return row_stored_iterator_T<D,RefType,YaleRef>(*this, p_first);  }\n  row_stored_nd_iterator_T<D,RefType,YaleRef> ndbegin() {  return row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, p_first);  }\n  row_stored_iterator_T<D,RefType,YaleRef> end() { return row_stored_iterator_T<D,RefType,YaleRef>(*this, p_last+1, true); }\n  row_stored_nd_iterator_T<D,RefType,YaleRef> ndend() {  return row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, p_last+1); }\n\n  row_stored_iterator_T<D,RefType,YaleRef> begin() const {  return row_stored_iterator_T<D,RefType,YaleRef>(*this, p_first);  }\n  row_stored_nd_iterator_T<D,RefType,YaleRef> ndbegin() const {  return row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, p_first);  }\n  row_stored_iterator_T<D,RefType,YaleRef> end() const { return row_stored_iterator_T<D,RefType,YaleRef>(*this, p_last+1, true); }\n  row_stored_nd_iterator_T<D,RefType,YaleRef> ndend() const {  return row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, p_last+1); }\n\n\n  row_stored_nd_iterator_T<D,RefType,YaleRef> lower_bound(const size_t& j) const {\n    row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, y.real_find_left_boundary_pos(p_first, p_last, y.offset(1)));\n  }\n\n  row_stored_nd_iterator_T<D,RefType,YaleRef> ndfind(size_t j) {\n    if (j == 0) return ndbegin();\n    size_t p = p_first > p_last ? p_first : y.real_find_left_boundary_pos(p_first, p_last, j + y.offset(1));\n    row_stored_nd_iterator iter = row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, p);\n    return iter;\n  }\n\n  row_stored_iterator_T<D,RefType,YaleRef> find(size_t j) {\n    if (j == 0) return begin(); // may or may not be on the diagonal\n    else return row_stored_iterator_T<D,RefType,YaleRef>(*this, ndfind(j).p(), false); // is on the diagonal, definitely\n  }\n\n  /*\n   * Remove an entry from an already found non-diagonal position. Adjust this row appropriately so we can continue to\n   * use it.\n   */\n  //template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>\n  row_stored_nd_iterator erase(row_stored_nd_iterator position) {\n    size_t sz = y.size();\n    if (sz - 1 <= y.capacity() / nm::yale_storage::GROWTH_CONSTANT) {\n      y.update_resize_move(position, real_i(), -1);\n    } else {\n      y.move_left(position, 1);\n      y.update_real_row_sizes_from(real_i(), -1);\n    }\n    adjust_length(-1);\n    return row_stored_nd_iterator(*this, position.p()-1);\n  }\n\n  /*\n   * Remove an entry from the matrix at the already-located position. If diagonal, just sets to default; otherwise,\n   * actually removes the entry.\n   */\n  //template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>\n  row_stored_nd_iterator erase(const row_stored_iterator& jt) {\n    if (jt.diag()) {\n      *jt = y.const_default_obj(); // diagonal is the easy case -- no movement.\n      return row_stored_nd_iterator(*this, jt.p());\n    } else {\n      return erase(row_stored_nd_iterator(*this, jt.p()));\n    }\n  }\n\n\n\n  //template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>\n  row_stored_nd_iterator insert(row_stored_nd_iterator position, size_t jj, const D& val) {\n    size_t sz = y.size();\n    while (!position.end() && position.j() < jj) ++position; // position is just a hint. (This loop ideally only has to happen once.)\n\n    if (!position.end() && position.j() == jj) {\n      *position = val;      // replace existing\n    } else {\n\n      if (sz + 1 > y.capacity()) {\n        y.update_resize_move(position, real_i(), 1);\n      } else {\n        y.move_right(position, 1);\n        y.update_real_row_sizes_from(real_i(), 1);\n      }\n      ija(position.p()) = jj + y.offset(1);    // set column ID\n      a(position.p())   = val;\n      adjust_length(1);\n    }\n\n    return position++;\n  }\n\n\n  /*\n   * This version of insert doesn't return anything. Why, when the others do?\n   *\n   * Well, mainly because j here can be a diagonal entry. Most of the inserters return the *next* element following\n   * the insertion, but to do that, we have to create a row_stored_nd_iterator, which requires at least one binary\n   * search for the location following the diagonal (and as of the writing of this, two binary searches). There's no\n   * reason to do that when we never actually *use* the return value. So instead we just have void.\n   */\n  //template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>\n  void insert(size_t j, const D& val) {\n    if (j + y.offset(1) == real_i())  a(real_i()) = val;\n    else {\n      row_stored_nd_iterator jt = ndfind(j);\n      if (!jt.end() && jt.j() == j) {\n        if (val == y.const_default_obj()) erase(jt);          // erase\n        else                              insert(jt, j, val); // replace\n      } else { // only insert if it's not the default\n        if (val != y.const_default_obj()) insert(jt, j, val);\n      }\n    }\n  }\n\n\n  /*\n   * Determines a plan for inserting a single row. Returns an integer giving the amount of the row change.\n   */\n  int single_row_insertion_plan(row_stored_nd_iterator position, size_t jj, size_t length, D const* v, size_t v_size, size_t& v_offset) {\n    int nd_change = 0;\n\n    for (size_t jc = jj; jc < jj + length; ++jc, ++v_offset) {\n      if (v_offset >= v_size) v_offset %= v_size; // reset v position.\n\n      if (jc + y.offset(1) != real_i()) { // diagonal    -- no nd_change here\n        if (position.end()) {\n          if (v[v_offset] != y.const_default_obj()) nd_change++; // insert\n        } else if (position.j() != jc) { // not present -- do we need to add it?\n          if (v[v_offset] != y.const_default_obj()) nd_change++;\n        } else {  // position.j() == jc\n          if (v[v_offset] == y.const_default_obj()) nd_change--;\n          ++position; // move iterator forward.\n        }\n      }\n    }\n    return nd_change;\n  }\n\n  /*\n   * Determine a plan for inserting a single row -- finds the position first. Returns the position and\n   * the change amount. Don't use this one if you can help it because it requires a binary search of\n   * the row.\n   */\n  std::pair<int,size_t> single_row_insertion_plan(size_t jj, size_t length, D const* v, size_t v_size, size_t& v_offset) {\n    std::pair<int,size_t> result;\n    row_stored_nd_iterator pos = ndfind(jj);\n    result.first = single_row_insertion_plan(pos, jj, length, v, v_size, v_offset);\n    result.second = pos.p();\n    return result;\n  }\n\n  /*\n   * Insert elements into a single row. Returns an iterator to the end of the insertion range.\n   */\n  row_stored_nd_iterator insert(row_stored_nd_iterator position, size_t jj, size_t length, D const* v, size_t v_size, size_t& v_offset) {\n    size_t tmp_v_offset = v_offset;\n    int nd_change = single_row_insertion_plan(position, jj, length, v, v_size, tmp_v_offset);\n\n    // First record the position, just in case our iterator becomes invalid.\n    size_t pp = position.p();\n\n    // Resize the array as necessary, or move entries after the insertion point to make room.\n    size_t sz = y.size();\n    if (sz + nd_change > y.capacity() || sz + nd_change <= y.capacity() / nm::yale_storage::GROWTH_CONSTANT)\n      y.update_resize_move(position, real_i(), nd_change);\n    else if (nd_change != 0) {\n      if (nd_change < 0)       y.move_left(position, -nd_change);\n      else if (nd_change > 0)  y.move_right(position, nd_change);\n      y.update_real_row_sizes_from(real_i(), nd_change);\n    }\n\n    for (size_t jc = jj; jc < jj + length; ++jc, ++v_offset) {\n      if (v_offset >= v_size) v_offset %= v_size; // reset v position.\n\n      if (jc + y.offset(1) == real_i()) {\n        y.a(real_i())   = v[v_offset];  // modify diagonal\n      } else if (v[v_offset] != y.const_default_obj()) {\n        y.ija(pp)       = jc;           // modify non-diagonal\n        y.a(pp)         = v[v_offset];\n        ++pp;\n      }\n    }\n\n    // Update this row.\n    adjust_length(nd_change);\n\n    return row_stored_nd_iterator(*this, pp);\n  }\n\n  /*\n   * For when we don't need to worry about the offset, does the same thing as the insert above.\n   */\n  row_stored_nd_iterator insert(const row_stored_nd_iterator& position, size_t jj, size_t length, D const* v, size_t v_size) {\n    size_t v_offset = 0;\n    return insert(position, jj, length, v, v_size, v_offset);\n  }\n\n\n  /*\n   * Merges elements offered for insertion with existing elements in the row.\n   */\n  row_stored_nd_iterator insert(size_t jj, size_t length, D const* v, size_t v_size, size_t& v_offset) {\n    return insert(ndfind(jj), jj, length, v, v_size, v_offset);\n  }\n\n  /*\n   * Merges elements offered for insertion with existing elements in the row.\n   */\n  row_stored_nd_iterator insert(size_t jj, size_t length, D const* v, size_t v_size) {\n    size_t v_offset = 0;\n    return insert(ndfind(jj), jj, length, v, v_size, v_offset);\n  }\n\n\n};\n\n} } // end of nm::yale_storage namespace\n\n#endif // YALE_ITERATORS_ROW_H\n"
  },
  {
    "path": "ext/nmatrix/storage/yale/iterators/row_stored.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == row_stored.h\n//\n// Iterator for traversing a single stored row of a matrix (needed\n// for row.h). FIXME: This is not as efficient as it could be; it uses\n// two binary searches to find the beginning and end of each slice.\n// The end search shouldn't be necessary, but I couldn't make it\n// work without it, and eventually decided my dissertation should\n// be a priority.\n//\n\n#ifndef YALE_ITERATORS_ROW_STORED_H\n# define YALE_ITERATORS_ROW_STORED_H\n\n#include <ruby.h>\n#include <stdexcept>\n\nnamespace nm { namespace yale_storage {\n\n\n/*\n * Iterator for visiting each stored element in a row, including diagonals.\n */\ntemplate <typename D,\n          typename RefType,\n          typename YaleRef = typename std::conditional<\n            std::is_const<RefType>::value,\n            const nm::YaleStorage<D>,\n            nm::YaleStorage<D>\n          >::type,\n          typename RowRef = typename std::conditional<\n            std::is_const<RefType>::value,\n            const row_iterator_T<D,RefType,YaleRef>,\n            row_iterator_T<D,RefType,YaleRef>\n          >::type>\nclass row_stored_iterator_T : public row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> {\nprotected:\n  using row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>::r;\n  using row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>::p_;\n  bool d_visited, d;\n\npublic:\n\n  // end_ is necessary for the logic when a row is empty other than the diagonal. If we just\n  // relied on pp == last_p+1, it'd look like these empty rows were actually end() iterators.\n  // So we have to actually mark end_ by telling it to ignore that diagonal visitation.\n  row_stored_iterator_T(RowRef& row, size_t pp, bool end_ = false)\n  : row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>(row, pp),\n    d_visited(!row.has_diag()), // if the row has no diagonal, just marked it as visited.\n    d(r.is_diag_first() && !end_)        // do we start at the diagonal?\n  {\n  }\n\n  /* Diagonal constructor. Puts us on the diagonal (unless end is true) */\n  /*row_stored_iterator_T(RowRef& row, bool end_, size_t j)\n  : row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>(row.ndfind(j)),\n    d_visited(false),\n    d(!end_ && j + row.offset(1) == row.real_i())\n  { }*/\n\n  virtual bool diag() const {\n    return d;\n  }\n\n  virtual bool end() const {\n    return !d && p_ > r.p_last;\n  }\n\n  row_stored_iterator_T<D,RefType,YaleRef,RowRef>& operator++() {\n    if (end()) throw std::out_of_range(\"cannot increment row stored iterator past end of stored row\");\n    if (d) {\n      d_visited = true;\n      d         = false;\n    } else {\n      ++p_;\n      // Are we at a diagonal?\n      // If we hit the end or reach a point where j > diag_j, and still\n      // haven't visited the diagonal, we should do so before continuing.\n      if (!d_visited && (end() || j() > r.diag_j())) {\n        d = true;\n      }\n    }\n\n    return *this;\n  }\n\n  row_stored_iterator_T<D,RefType,YaleRef,RowRef> operator++(int dummy) const {\n    row_stored_iterator_T<D,RefType,YaleRef,RowRef> r(*this);\n    return ++r;\n  }\n\n  size_t j() const {\n    if (end()) throw std::out_of_range(\"cannot dereference an end pointer\");\n    return (d ? r.p_diag() : r.ija(p_)) - r.offset(1);\n  }\n\n  // Need to declare all row_stored_iterator_T friends of each other.\n  template <typename E, typename ERefType, typename EYaleRef, typename ERowRef> friend class row_stored_iterator_T;\n\n  // De-reference the iterator\n  RefType& operator*()       {\n    return d ? r.a(r.p_diag()) : r.a(p_);\n  }\n\n  RefType& operator*() const {\n    return d ? r.a(r.p_diag()) : r.a(p_);\n  }\n\n  // Ruby VALUE de-reference\n  VALUE operator~() const {\n    return nm_rb_dereference<D>(**this);\n  }\n\n};\n\n}} // end of namespace nm::yale_storage\n\n#endif // YALE_ITERATORS_ROW_STORED_H\n"
  },
  {
    "path": "ext/nmatrix/storage/yale/iterators/row_stored_nd.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == row_stored_nd.h\n//\n// Yale storage row-by-row nondiagonal-storage iterator\n//\n\n#ifndef YALE_ITERATORS_ROW_STORED_ND_H\n# define YALE_ITERATORS_ROW_STORED_ND_H\n\n#include <ruby.h>\n#include <type_traits>\n#include <typeinfo>\n#include <stdexcept>\n\nnamespace nm { namespace yale_storage {\n\n/*\n * Constants\n */\nconst float GROWTH_CONSTANT = 1.5;\n\n\n/*\n * Forward declarations\n */\ntemplate <typename D, typename RefType, typename YaleRef> class row_iterator_T;\n\n/*\n * Iterator for visiting each stored element in a row, including diagonals.\n */\ntemplate <typename D,\n          typename RefType,\n          typename YaleRef = typename std::conditional<\n            std::is_const<RefType>::value,\n            const nm::YaleStorage<D>,\n            nm::YaleStorage<D>\n          >::type,\n          typename RowRef = typename std::conditional<\n            std::is_const<RefType>::value,\n            const row_iterator_T<D,RefType,YaleRef>,\n            row_iterator_T<D,RefType,YaleRef>\n          >::type>\nclass row_stored_nd_iterator_T {\nprotected:\n  RowRef& r;\n  size_t p_;\n\npublic:\n\n  row_stored_nd_iterator_T(RowRef& row, size_t pp)\n  : r(row),\n    p_(pp)        // do we start at the diagonal?\n  {\n  }\n\n  // DO NOT IMPLEMENT THESE FUNCTIONS. They prevent C++ virtual slicing\n  //template <typename T> row_stored_nd_iterator_T(T const& rhs);\n  //template <typename T> row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> const& operator=(T const& rhs);\n\n  // Next two functions are to ensure we can still cast between nd iterators.\n  row_stored_nd_iterator_T(row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> const& rhs)\n  : r(rhs.r), p_(rhs.p_)\n  { }\n\n  row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> const& operator=(row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> const& rhs) {\n    if (&r != &(rhs.r))\n      throw std::logic_error(\"can't assign iterator from another row iterator\");\n    p_ = rhs.p_;\n    return *this;\n  }\n\n  virtual size_t p() const { return p_; }\n\n  virtual bool end() const {\n    return p_ > r.p_last;\n  }\n\n  row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>& operator++() {\n    if (end()) throw std::out_of_range(\"cannot increment row stored iterator past end of stored row\");\n    ++p_;\n\n    return *this;\n  }\n\n  row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> operator++(int dummy) const {\n    row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> r(*this);\n    return ++r;\n  }\n\n  virtual size_t j() const {\n    if (end()) throw std::out_of_range(\"cannot dereference (get j()) for an end pointer\");\n    return r.ija(p_) - r.offset(1);\n  }\n\n  // Need to declare all row_stored_nd_iterator_T friends of each other.\n  template <typename E, typename ERefType, typename EYaleRef, typename ERowRef> friend class row_stored_nd_iterator_T;\n\n\n  virtual bool operator==(const row_stored_nd_iterator_T<D,RefType>& rhs) const {\n    if (r.i() != rhs.r.i())     return false;\n    if (end())                  return rhs.end();\n    else if (rhs.end())         return false;\n    return j() == rhs.j();\n  }\n\n  // There is something wrong with this function.\n  virtual bool operator!=(const row_stored_nd_iterator_T<D,RefType>& rhs) const {\n    if (r.i() != rhs.r.i()) return true;\n    if (end())              return !rhs.end();\n    else if (rhs.end())     return true;\n    return j() != rhs.j();\n  }\n\n  template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>\n  bool operator<(const row_stored_nd_iterator_T<E,ERefType>& rhs) const {\n    if (r < rhs.r)      return true;\n    if (r > rhs.r)      return false;\n\n    // r == rhs.r\n    if (end())        return false;\n    if (rhs.end())    return true;\n    return j() < rhs.j();\n  }\n\n  // De-reference the iterator\n  RefType& operator*()       {\n    return r.a(p_);\n  }\n\n  RefType& operator*() const {\n    return r.a(p_);\n  }\n\n  // Ruby VALUE de-reference\n  VALUE operator~() const {\n    return nm_rb_dereference<D>(**this);\n  }\n\n  inline virtual VALUE rb_j() const { return LONG2NUM(j()); }\n\n};\n\n\n\n} } // end of namespace nm::yale_storage\n\n#endif // YALE_ITERATORS_ROW_STORED_ND_H\n"
  },
  {
    "path": "ext/nmatrix/storage/yale/iterators/stored_diagonal.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == stored_diagonal_iterator.h\n//\n// Yale storage diagonal-storage iterator\n//\n\n#ifndef YALE_ITERATORS_STORED_DIAGONAL_H\n# define YALE_ITERATORS_STORED_DIAGONAL_H\n\n#include <ruby.h>\n#include <type_traits>\n#include <typeinfo>\n\nnamespace nm { namespace yale_storage {\n\n/*\n * Iterate across the stored diagonal.\n */\ntemplate <typename D,\n          typename RefType,\n          typename YaleRef = typename std::conditional<\n            std::is_const<RefType>::value,\n            const nm::YaleStorage<D>,\n            nm::YaleStorage<D>\n          >::type>\nclass stored_diagonal_iterator_T : public basic_iterator_T<D,RefType,YaleRef> {\n  using basic_iterator_T<D,RefType,YaleRef>::p_;\n  using basic_iterator_T<D,RefType,YaleRef>::y;\n  using basic_iterator_T<D,RefType,YaleRef>::offset;\n  using basic_iterator_T<D,RefType,YaleRef>::shape;\npublic:\n  stored_diagonal_iterator_T(YaleRef& obj, size_t d = 0)\n  : basic_iterator_T<D,RefType,YaleRef>(obj,                // y\n                   std::max(obj.offset(0), obj.offset(1)) + d - obj.offset(0), // i_\n                   std::max(obj.offset(0), obj.offset(1)) + d) // p_\n  {\n//      std::cerr << \"sdbegin: d=\" << d << \", p_=\" << p_ << \", i()=\" << i() << \", j()=\" << j() << std::endl;\n    // p_ can range from max(y.offset(0), y.offset(1)) to min(y.real_shape(0), y.real_shape(1))\n  }\n\n\n  size_t d() const {\n    return p_ - std::max(offset(0), offset(1));\n  }\n\n  stored_diagonal_iterator_T<D,RefType,YaleRef>& operator++() {\n    if (i() < shape(0)) ++p_;\n    return *this;\n  }\n\n  stored_diagonal_iterator_T<D,RefType,YaleRef> operator++(int dummy) const {\n    stored_diagonal_iterator_T<D,RefType,YaleRef> iter(*this);\n    return ++iter;\n  }\n\n  // Indicates if we're at the end of the iteration.\n  bool end() const {\n    return p_ >= std::min( shape(0) + offset(0), shape(1) + offset(1) );\n  }\n\n  // i() and j() are how we know if we're past-the-end. i will be shape(0) and j will be 0.\n  size_t i() const {\n    return p_ - offset(0);\n  }\n\n  size_t j() const {\n    return p_ - offset(1);\n  }\n\n\n  template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>\n  bool operator!=(const stored_diagonal_iterator_T<E,ERefType>& rhs) const { return d() != rhs.d(); }\n\n  template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>\n  bool operator==(const stored_diagonal_iterator_T<E,ERefType>& rhs) const { return !(*this != rhs); }\n\n  template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>\n  bool operator<(const stored_diagonal_iterator_T<E,ERefType>& rhs) const {  return d() < rhs.d(); }\n\n  template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>\n  bool operator<=(const stored_diagonal_iterator_T<E,ERefType>& rhs) const {\n    return d() <= rhs.d();\n  }\n\n  template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>\n  bool operator>(const stored_diagonal_iterator_T<E,ERefType>& rhs) const {\n    return d() > rhs.d();\n  }\n\n  template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>\n  bool operator>=(const stored_diagonal_iterator_T<E,ERefType>& rhs) const {\n    return d() >= rhs.d();\n  }\n\n  RefType& operator*() { return y.a(p_); }\n  RefType& operator*() const { return y.a(p_); }\n\n};\n\n} } // end of namespace nm::yale_storage\n\n#endif // YALE_ITERATORS_STORED_DIAGONAL_H\n"
  },
  {
    "path": "ext/nmatrix/storage/yale/math/transpose.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == transpose.h\n//\n// Functions for Yale math: transposing\n//\n\n#ifndef YALE_MATH_TRANSPOSE_H\n# define YALE_MATH_TRANSPOSE_H\n\nnamespace nm { namespace yale_storage {\n\n/*\n * Transposes a generic Yale matrix (old or new). Specify new by setting RDiag = true.\n *\n * Based on transp from SMMP (same as symbmm and numbmm).\n *\n * This is not named in the same way as most yale_storage functions because it does not act on a YALE_STORAGE\n * object.\n */\n\ntemplate <typename AD, typename BD, bool DiagA, bool Move>\nvoid transpose_yale(const size_t n, const size_t m,\n                    const size_t* ia, const size_t* ja, const AD* a, const AD& a_default,\n                    size_t* ib, size_t* jb, BD* b, const BD& b_default) {\n\n  size_t index;\n\n  // Clear B\n  for (size_t i = 0; i < m+1; ++i) ib[i] = 0;\n\n  if (Move)\n    for (size_t i = 0; i < m+1; ++i) b[i] = b_default;\n\n  if (DiagA) ib[0] = m + 1;\n  else       ib[0] = 0;\n\n  /* count indices for each column */\n\n  for (size_t i = 0; i < n; ++i) {\n    for (size_t j = ia[i]; j < ia[i+1]; ++j) {\n      ++(ib[ja[j]+1]);\n    }\n  }\n\n  for (size_t i = 0; i < m; ++i) {\n    ib[i+1] = ib[i] + ib[i+1];\n  }\n\n  /* now make jb */\n\n  for (size_t i = 0; i < n; ++i) {\n\n    for (size_t j = ia[i]; j < ia[i+1]; ++j) {\n      index = ja[j];\n      jb[ib[index]] = i;\n\n      if (Move && a[j] != a_default)\n        b[ib[index]] = a[j];\n\n      ++(ib[index]);\n    }\n  }\n\n  /* now fixup ib */\n\n  for (size_t i = m; i >= 1; --i) {\n    ib[i] = ib[i-1];\n  }\n\n\n  if (DiagA) {\n    if (Move) {\n      size_t j = std::min(n,m);\n\n      for (size_t i = 0; i < j; ++i) {\n        b[i] = a[i];\n      }\n    }\n    ib[0] = m + 1;\n\n  } else {\n    ib[0] = 0;\n  }\n}\n\n} } // end of namespace nm::yale_storage\n\n#endif"
  },
  {
    "path": "ext/nmatrix/storage/yale/yale.cpp",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == yale.c\n//\n// \"new yale\" storage format for 2D matrices (like yale, but with\n// the diagonal pulled out for O(1) access).\n//\n// Specifications:\n// * dtype and index dtype must necessarily differ\n//      * index dtype is defined by whatever unsigned type can store\n//        max(rows,cols)\n//      * that means vector ija stores only index dtype, but a stores\n//        dtype\n// * vectors must be able to grow as necessary\n//      * maximum size is rows*cols+1\n\n/*\n * Standard Includes\n */\n\n#include <ruby.h>\n#include <algorithm>  // std::min\n#include <cstdio>     // std::fprintf\n#include <iostream>\n#include <array>\n#include <typeinfo>\n#include <tuple>\n#include <queue>\n\n/*\n * Project Includes\n */\n\n// #include \"types.h\"\n#include \"../../data/data.h\"\n#include \"../../math/math.h\"\n\n#include \"../common.h\"\n\n#include \"../../nmatrix.h\"\n#include \"../../data/meta.h\"\n\n#include \"iterators/base.h\"\n#include \"iterators/stored_diagonal.h\"\n#include \"iterators/row_stored_nd.h\"\n#include \"iterators/row_stored.h\"\n#include \"iterators/row.h\"\n#include \"iterators/iterator.h\"\n#include \"class.h\"\n#include \"yale.h\"\n#include \"../../ruby_constants.h\"\n\n/*\n * Macros\n */\n\n#ifndef NM_MAX\n#define NM_MAX(a,b) (((a)>(b))?(a):(b))\n#define NM_MIN(a,b) (((a)<(b))?(a):(b))\n#endif\n\n/*\n * Forward Declarations\n */\n\nextern \"C\" {\n  static YALE_STORAGE*  alloc(nm::dtype_t dtype, size_t* shape, size_t dim);\n\n  static size_t yale_count_slice_copy_ndnz(const YALE_STORAGE* s, size_t*, size_t*);\n\n  static void* default_value_ptr(const YALE_STORAGE* s);\n  static VALUE default_value(const YALE_STORAGE* s);\n  static VALUE obj_at(YALE_STORAGE* s, size_t k);\n\n  /* Ruby-accessible functions */\n  static VALUE nm_size(VALUE self);\n  static VALUE nm_a(int argc, VALUE* argv, VALUE self);\n  static VALUE nm_d(int argc, VALUE* argv, VALUE self);\n  static VALUE nm_lu(VALUE self);\n  static VALUE nm_ia(VALUE self);\n  static VALUE nm_ja(VALUE self);\n  static VALUE nm_ija(int argc, VALUE* argv, VALUE self);\n  static VALUE nm_row_keys_intersection(VALUE m1, VALUE ii1, VALUE m2, VALUE ii2);\n\n  static VALUE nm_nd_row(int argc, VALUE* argv, VALUE self);\n\n  static inline size_t src_ndnz(const YALE_STORAGE* s) {\n    return reinterpret_cast<YALE_STORAGE*>(s->src)->ndnz;\n  }\n\n} // end extern \"C\" block\n\nnamespace nm { namespace yale_storage {\n\ntemplate <typename LD, typename RD>\nstatic VALUE map_merged_stored(VALUE left, VALUE right, VALUE init);\n\ntemplate <typename DType>\nstatic bool            ndrow_is_empty(const YALE_STORAGE* s, IType ija, const IType ija_next);\n\ntemplate <typename LDType, typename RDType>\nstatic bool            ndrow_eqeq_ndrow(const YALE_STORAGE* l, const YALE_STORAGE* r, IType l_ija, const IType l_ija_next, IType r_ija, const IType r_ija_next);\n\ntemplate <typename LDType, typename RDType>\nstatic bool           eqeq(const YALE_STORAGE* left, const YALE_STORAGE* right);\n\ntemplate <typename LDType, typename RDType>\nstatic bool eqeq_different_defaults(const YALE_STORAGE* s, const LDType& s_init, const YALE_STORAGE* t, const RDType& t_init);\n\nstatic void            increment_ia_after(YALE_STORAGE* s, IType ija_size, IType i, long n);\n\nstatic IType          insert_search(YALE_STORAGE* s, IType left, IType right, IType key, bool& found);\n\ntemplate <typename DType>\nstatic char           vector_insert(YALE_STORAGE* s, size_t pos, size_t* j, void* val_, size_t n, bool struct_only);\n\ntemplate <typename DType>\nstatic char           vector_insert_resize(YALE_STORAGE* s, size_t current_size, size_t pos, size_t* j, size_t n, bool struct_only);\n\ntemplate <typename DType>\nstatic std::tuple<long,bool,std::queue<std::tuple<IType,IType,int> > > count_slice_set_ndnz_change(YALE_STORAGE* s, size_t* coords, size_t* lengths, DType* v, size_t v_size);\n\nstatic inline IType* IJA(const YALE_STORAGE* s) {\n  return reinterpret_cast<YALE_STORAGE*>(s->src)->ija;\n}\n\nstatic inline IType IJA_SET(const YALE_STORAGE* s, size_t loc, IType val) {\n  return IJA(s)[loc] = val;\n}\n\ntemplate <typename DType>\nstatic inline DType* A(const YALE_STORAGE* s) {\n  return reinterpret_cast<DType*>(reinterpret_cast<YALE_STORAGE*>(s->src)->a);\n}\n\ntemplate <typename DType>\nstatic inline DType A_SET(const YALE_STORAGE* s, size_t loc, DType val) {\n  return A<DType>(s)[loc] = val;\n}\n\n\n/*\n * Functions\n */\n\n/*\n * Copy a vector from one DType to another.\n */\ntemplate <typename LType, typename RType>\nstatic inline void copy_recast_vector(const void* in_, void* out_, size_t length) {\n  const RType* in = reinterpret_cast<const RType*>(in_);\n  LType* out      = reinterpret_cast<LType*>(out_);\n  for (size_t i = 0; i < length; ++i) {\n    out[i] = in[i];\n  }\n  out;\n}\n\n\n\n/*\n * Create Yale storage from IA, JA, and A vectors given in Old Yale format (probably from a file, since NMatrix only uses\n * new Yale for its storage).\n *\n * This function is needed for Matlab .MAT v5 IO.\n */\ntemplate <typename LDType, typename RDType>\nYALE_STORAGE* create_from_old_yale(dtype_t dtype, size_t* shape, char* r_ia, char* r_ja, char* r_a) {\n  IType*  ir = reinterpret_cast<IType*>(r_ia);\n  IType*  jr = reinterpret_cast<IType*>(r_ja);\n  RDType* ar = reinterpret_cast<RDType*>(r_a);\n\n  // Read through ia and ja and figure out the ndnz (non-diagonal non-zeros) count.\n  size_t ndnz = 0, i, p, p_next;\n\n  for (i = 0; i < shape[0]; ++i) { // Walk down rows\n    for (p = ir[i], p_next = ir[i+1]; p < p_next; ++p) { // Now walk through columns\n\n      if (i != jr[p]) ++ndnz; // entry is non-diagonal and probably nonzero\n\n    }\n  }\n\n  // Having walked through the matrix, we now go about allocating the space for it.\n  YALE_STORAGE* s = alloc(dtype, shape, 2);\n\n  s->capacity = shape[0] + ndnz + 1;\n  s->ndnz     = ndnz;\n\n  // Setup IJA and A arrays\n  s->ija = NM_ALLOC_N( IType, s->capacity );\n  s->a   = NM_ALLOC_N( LDType, s->capacity );\n  IType* ijl    = reinterpret_cast<IType*>(s->ija);\n  LDType* al    = reinterpret_cast<LDType*>(s->a);\n\n  // set the diagonal to zero -- this prevents uninitialized values from popping up.\n  for (size_t index = 0; index < shape[0]; ++index) {\n    al[index] = 0;\n  }\n\n  // Figure out where to start writing JA in IJA:\n  size_t pp = s->shape[0]+1;\n\n  // Find beginning of first row\n  p = ir[0];\n\n  // Now fill the arrays\n  for (i = 0; i < s->shape[0]; ++i) {\n\n    // Set the beginning of the row (of output)\n    ijl[i] = pp;\n\n    // Now walk through columns, starting at end of row (of input)\n    for (size_t p_next = ir[i+1]; p < p_next; ++p, ++pp) {\n\n      if (i == jr[p]) { // diagonal\n\n        al[i] = ar[p];\n        --pp;\n\n      } else {          // nondiagonal\n\n        ijl[pp] = jr[p];\n        al[pp]  = ar[p];\n\n      }\n    }\n  }\n\n  ijl[i] = pp; // Set the end of the last row\n\n  // Set the zero position for our output matrix\n  al[i] = 0;\n\n  return s;\n}\n\n\n/*\n * Empty the matrix by initializing the IJA vector and setting the diagonal to 0.\n *\n * Called when most YALE_STORAGE objects are created.\n *\n * Can't go inside of class YaleStorage because YaleStorage creation requires that\n * IJA already be initialized.\n */\ntemplate <typename DType>\nvoid init(YALE_STORAGE* s, void* init_val) {\n  IType IA_INIT = s->shape[0] + 1;\n\n  IType* ija = reinterpret_cast<IType*>(s->ija);\n  // clear out IJA vector\n  for (IType i = 0; i < IA_INIT; ++i) {\n    ija[i] = IA_INIT; // set initial values for IJA\n  }\n\n  clear_diagonal_and_zero<DType>(s, init_val);\n}\n\n\ntemplate <typename LDType, typename RDType>\nstatic YALE_STORAGE* slice_copy(YALE_STORAGE* s) {\n  YaleStorage<RDType> y(s);\n  return y.template alloc_copy<LDType, false>();\n}\n\n\n/*\n * Template version of copy transposed. This could also, in theory, allow a map -- but transpose.h\n * would need to be updated.\n *\n * TODO: Update for slicing? Update for different dtype in and out? We can cast rather easily without\n * too much modification.\n */\ntemplate <typename D>\nYALE_STORAGE* copy_transposed(YALE_STORAGE* rhs) {\n  YaleStorage<D> y(rhs);\n  return y.template alloc_copy_transposed<D, false>();\n}\n\n\n///////////////\n// Accessors //\n///////////////\n\n\n/*\n * Determine the number of non-diagonal non-zeros in a not-yet-created copy of a slice or matrix.\n */\ntemplate <typename DType>\nstatic size_t count_slice_copy_ndnz(const YALE_STORAGE* s, size_t* offset, size_t* shape) {\n  IType* ija = s->ija;\n  DType* a   = reinterpret_cast<DType*>(s->a);\n\n  DType ZERO(*reinterpret_cast<DType*>(default_value_ptr(s)));\n\n  // Calc ndnz for the destination\n  size_t ndnz  = 0;\n  size_t i, j; // indexes of destination matrix\n  size_t k, l; // indexes of source matrix\n  for (i = 0; i < shape[0]; i++) {\n    k = i + offset[0];\n    for (j = 0; j < shape[1]; j++) {\n      l = j + offset[1];\n\n      if (j == i)  continue;\n\n      if (k == l) { // for diagonal element of source\n        if (a[k] != ZERO) ++ndnz;\n      } else { // for non-diagonal element\n        for (size_t c = ija[k]; c < ija[k+1]; c++) {\n          if (ija[c] == l) {\n            ++ndnz;\n            break;\n          }\n        }\n      }\n    }\n  }\n\n  return ndnz;\n}\n\n\n\n/*\n * Get a single element of a yale storage object\n */\ntemplate <typename DType>\nstatic void* get_single(YALE_STORAGE* storage, SLICE* slice) {\n  YaleStorage<DType> y(storage);\n  return reinterpret_cast<void*>(y.get_single_p(slice));\n}\n\n\n/*\n * Returns a reference-slice of a matrix.\n */\ntemplate <typename DType>\nYALE_STORAGE* ref(YALE_STORAGE* s, SLICE* slice) {\n  return YaleStorage<DType>(s).alloc_ref(slice);\n}\n\n\n/*\n * Attempt to set a cell or cells in a Yale matrix.\n */\ntemplate <typename DType>\nvoid set(VALUE left, SLICE* slice, VALUE right) {\n  YALE_STORAGE* storage = NM_STORAGE_YALE(left);\n  YaleStorage<DType> y(storage);\n  y.insert(slice, right);\n}\n\n///////////\n// Tests //\n///////////\n\n/*\n * Yale eql? -- for whole-matrix comparison returning a single value.\n */\ntemplate <typename LDType, typename RDType>\nstatic bool eqeq(const YALE_STORAGE* left, const YALE_STORAGE* right) {\n  return YaleStorage<LDType>(left) == YaleStorage<RDType>(right);\n}\n\n\n//////////\n// Math //\n//////////\n\n#define YALE_IA(s) (reinterpret_cast<IType*>(s->ija))\n#define YALE_IJ(s) (reinterpret_cast<IType*>(s->ija) + s->shape[0] + 1)\n#define YALE_COUNT(yale) (yale->ndnz + yale->shape[0])\n\n/////////////\n// Utility //\n/////////////\n\n\n/*\n * Binary search for finding the beginning of a slice. Returns the position of the first element which is larger than\n * bound.\n */\nIType binary_search_left_boundary(const YALE_STORAGE* s, IType left, IType right, IType bound) {\n  if (left > right) return -1;\n\n  IType* ija  = IJA(s);\n\n  if (ija[left] >= bound) return left; // shortcut\n\n  IType mid   = (left + right) / 2;\n  IType mid_j = ija[mid];\n\n  if (mid_j == bound)\n    return mid;\n  else if (mid_j > bound) { // eligible! don't exclude it.\n    return binary_search_left_boundary(s, left, mid, bound);\n  } else // (mid_j < bound)\n    return binary_search_left_boundary(s, mid + 1, right, bound);\n}\n\n\n/*\n * Binary search for returning stored values. Returns a non-negative position, or -1 for not found.\n */\nint binary_search(YALE_STORAGE* s, IType left, IType right, IType key) {\n  if (s->src != s) throw; // need to fix this quickly\n\n  if (left > right) return -1;\n\n  IType* ija = s->ija;\n\n  IType mid = (left + right)/2;\n  IType mid_j = ija[mid];\n\n  if (mid_j == key)\n    return mid;\n\n  else if (mid_j > key)\n    return binary_search(s, left, mid - 1, key);\n\n  else\n    return binary_search(s, mid + 1, right, key);\n}\n\n\n/*\n * Resize yale storage vectors A and IJA, copying values.\n */\nstatic void vector_grow(YALE_STORAGE* s) {\n  if (s != s->src) {\n    throw; // need to correct this quickly.\n  }\n  nm_yale_storage_register(s);\n  size_t new_capacity = s->capacity * GROWTH_CONSTANT;\n  size_t max_capacity = YaleStorage<uint8_t>::max_size(s->shape);\n\n  if (new_capacity > max_capacity) new_capacity = max_capacity;\n\n  IType* new_ija      = NM_ALLOC_N(IType, new_capacity);\n  void* new_a         = NM_ALLOC_N(char, DTYPE_SIZES[s->dtype] * new_capacity);\n\n  IType* old_ija      = s->ija;\n  void* old_a         = s->a;\n\n  memcpy(new_ija, old_ija, s->capacity * sizeof(IType));\n  memcpy(new_a,   old_a,   s->capacity * DTYPE_SIZES[s->dtype]);\n\n  s->capacity         = new_capacity;\n\n  if (s->dtype == nm::RUBYOBJ)\n    nm_yale_storage_register_a(new_a, s->capacity * DTYPE_SIZES[s->dtype]);\n\n  NM_FREE(old_ija);\n  nm_yale_storage_unregister(s);\n  NM_FREE(old_a);\n  if (s->dtype == nm::RUBYOBJ)\n    nm_yale_storage_unregister_a(new_a, s->capacity * DTYPE_SIZES[s->dtype]);\n\n  s->ija         = new_ija;\n  s->a           = new_a;\n\n}\n\n\n/*\n * Resize yale storage vectors A and IJA in preparation for an insertion.\n */\ntemplate <typename DType>\nstatic char vector_insert_resize(YALE_STORAGE* s, size_t current_size, size_t pos, size_t* j, size_t n, bool struct_only) {\n  if (s != s->src) throw;\n\n  // Determine the new capacity for the IJA and A vectors.\n  size_t new_capacity = s->capacity * GROWTH_CONSTANT;\n  size_t max_capacity = YaleStorage<DType>::max_size(s->shape);\n\n  if (new_capacity > max_capacity) {\n    new_capacity = max_capacity;\n\n    if (current_size + n > max_capacity) rb_raise(rb_eNoMemError, \"insertion size exceeded maximum yale matrix size\");\n  }\n\n  if (new_capacity < current_size + n)\n    new_capacity = current_size + n;\n\n  nm_yale_storage_register(s);\n\n  // Allocate the new vectors.\n  IType* new_ija     = NM_ALLOC_N( IType, new_capacity );\n  NM_CHECK_ALLOC(new_ija);\n\n  DType* new_a       = NM_ALLOC_N( DType, new_capacity );\n  NM_CHECK_ALLOC(new_a);\n\n  IType* old_ija     = reinterpret_cast<IType*>(s->ija);\n  DType* old_a       = reinterpret_cast<DType*>(s->a);\n\n  // Copy all values prior to the insertion site to the new IJA and new A\n  if (struct_only) {\n    for (size_t i = 0; i < pos; ++i) {\n      new_ija[i] = old_ija[i];\n    }\n  } else {\n    for (size_t i = 0; i < pos; ++i) {\n      new_ija[i] = old_ija[i];\n      new_a[i]   = old_a[i];\n    }\n  }\n\n\n  // Copy all values subsequent to the insertion site to the new IJA and new A, leaving room (size n) for insertion.\n  if (struct_only) {\n    for (size_t i = pos; i < current_size; ++i) {\n      new_ija[i+n] = old_ija[i];\n    }\n  } else {\n    for (size_t i = pos; i < current_size; ++i) {\n      new_ija[i+n] = old_ija[i];\n      new_a[i+n] = old_a[i];\n    }\n  }\n\n  s->capacity = new_capacity;\n  if (s->dtype == nm::RUBYOBJ)\n    nm_yale_storage_register_a(new_a, new_capacity);\n\n  NM_FREE(s->ija);\n  nm_yale_storage_unregister(s);\n  NM_FREE(s->a);\n\n  if (s->dtype == nm::RUBYOBJ)\n    nm_yale_storage_unregister_a(new_a, new_capacity);\n\n  s->ija = new_ija;\n  s->a   = reinterpret_cast<void*>(new_a);\n\n  return 'i';\n}\n\n/*\n * Insert a value or contiguous values in the ija and a vectors (after ja and\n * diag). Does not free anything; you are responsible!\n *\n * TODO: Improve this so it can handle non-contiguous element insertions\n *  efficiently. For now, we can just sort the elements in the row in\n *  question.)\n */\ntemplate <typename DType>\nstatic char vector_insert(YALE_STORAGE* s, size_t pos, size_t* j, void* val_, size_t n, bool struct_only) {\n\n  if (pos < s->shape[0]) {\n    rb_raise(rb_eArgError, \"vector insert pos (%lu) is before beginning of ja (%lu); this should not happen\", pos, s->shape[0]);\n  }\n\n  DType* val = reinterpret_cast<DType*>(val_);\n\n  size_t size = s->ija[s->shape[0]];\n\n  IType* ija = s->ija;\n  DType* a   = reinterpret_cast<DType*>(s->a);\n\n  if (size + n > s->capacity) {\n    vector_insert_resize<DType>(s, size, pos, j, n, struct_only);\n\n    // Need to get the new locations for ija and a.\n    ija = s->ija;\n    a   = reinterpret_cast<DType*>(s->a);\n  } else {\n    /*\n     * No resize required:\n     * easy (but somewhat slow), just copy elements to the tail, starting at\n     * the end, one element at a time.\n     *\n     * TODO: This can be made slightly more efficient, but only after the tests\n     *  are written.\n     */\n\n    if (struct_only) {\n      for (size_t i = 0; i < size - pos; ++i) {\n        ija[size+n-1-i] = ija[size-1-i];\n      }\n    } else {\n      for (size_t i = 0; i < size - pos; ++i) {\n        ija[size+n-1-i] = ija[size-1-i];\n        a[size+n-1-i]   = a[size-1-i];\n      }\n    }\n  }\n\n  // Now insert the new values.\n  if (struct_only) {\n    for (size_t i = 0; i < n; ++i) {\n      ija[pos+i]  = j[i];\n    }\n  } else {\n    for (size_t i = 0; i < n; ++i) {\n      ija[pos+i]  = j[i];\n      a[pos+i]    = val[i];\n    }\n  }\n\n  return 'i';\n}\n\n/*\n * If we add n items to row i, we need to increment ija[i+1] and onward.\n */\nstatic void increment_ia_after(YALE_STORAGE* s, IType ija_size, IType i, long n) {\n  IType* ija = s->ija;\n\n  ++i;\n  for (; i <= ija_size; ++i) {\n    ija[i] += n;\n  }\n}\n\n/*\n * Binary search for returning insertion points.\n */\nstatic IType insert_search(YALE_STORAGE* s, IType left, IType right, IType key, bool& found) {\n\n  if (left > right) {\n    found = false;\n    return left;\n  }\n\n  IType* ija = s->ija;\n  IType mid = (left + right)/2;\n  IType mid_j = ija[mid];\n\n  if (mid_j == key) {\n    found = true;\n    return mid;\n\n  } else if (mid_j > key) {\n    return insert_search(s, left, mid-1, key, found);\n\n  } else {\n    return insert_search(s, mid+1, right, key, found);\n  }\n}\n\n/////////////////////////\n// Copying and Casting //\n/////////////////////////\n\n/*\n * Templated copy constructor for changing dtypes.\n */\ntemplate <typename L, typename R>\nYALE_STORAGE* cast_copy(const YALE_STORAGE* rhs) {\n  YaleStorage<R> y(rhs);\n  return y.template alloc_copy<L>();\n}\n\n/*\n * Template access for getting the size of Yale storage.\n */\nsize_t get_size(const YALE_STORAGE* storage) {\n  return storage->ija[ storage->shape[0] ];\n}\n\n\ntemplate <typename DType>\nstatic STORAGE* matrix_multiply(const STORAGE_PAIR& casted_storage, size_t* resulting_shape, bool vector) {\n  YALE_STORAGE *left  = (YALE_STORAGE*)(casted_storage.left),\n               *right = (YALE_STORAGE*)(casted_storage.right);\n\n  nm_yale_storage_register(left);\n  nm_yale_storage_register(right);\n  // We can safely get dtype from the casted matrices; post-condition of binary_storage_cast_alloc is that dtype is the\n  // same for left and right.\n  // int8_t dtype = left->dtype;\n\n  IType* ijl = left->ija;\n  IType* ijr = right->ija;\n\n  // First, count the ndnz of the result.\n  // TODO: This basically requires running symbmm twice to get the exact ndnz size. That's frustrating. Are there simple\n  // cases where we can avoid running it?\n  size_t result_ndnz = nm::math::symbmm(resulting_shape[0], left->shape[1], resulting_shape[1], ijl, ijl, true, ijr, ijr, true, NULL, true);\n\n  // Create result storage.\n  YALE_STORAGE* result = nm_yale_storage_create(left->dtype, resulting_shape, 2, result_ndnz);\n  init<DType>(result, NULL);\n  IType* ija = result->ija;\n\n  // Symbolic multiplication step (build the structure)\n  nm::math::symbmm(resulting_shape[0], left->shape[1], resulting_shape[1], ijl, ijl, true, ijr, ijr, true, ija, true);\n\n  // Numeric multiplication step (fill in the elements)\n\n  nm::math::numbmm<DType>(result->shape[0], left->shape[1], result->shape[1],\n                                ijl, ijl, reinterpret_cast<DType*>(left->a), true,\n                                ijr, ijr, reinterpret_cast<DType*>(right->a), true,\n                                ija, ija, reinterpret_cast<DType*>(result->a), true);\n\n\n  // Sort the columns\n  nm::math::smmp_sort_columns<DType>(result->shape[0], ija, ija, reinterpret_cast<DType*>(result->a));\n\n  nm_yale_storage_unregister(right);\n  nm_yale_storage_unregister(left);\n  return reinterpret_cast<STORAGE*>(result);\n}\n\n\n/*\n * Get the sum of offsets from the original matrix (for sliced iteration).\n */\nstatic std::array<size_t,2> get_offsets(YALE_STORAGE* x) {\n  std::array<size_t, 2> offsets{ {0,0} };\n  while (x != x->src) {\n    offsets[0] += x->offset[0];\n    offsets[1] += x->offset[1];\n    x = reinterpret_cast<YALE_STORAGE*>(x->src);\n  }\n  return offsets;\n}\n\n\nclass RowIterator {\nprotected:\n  YALE_STORAGE* s;\n  IType* ija;\n  void*  a;\n  IType i, k, k_end;\n  size_t j_offset, j_shape;\n  bool diag, End;\n  VALUE init;\npublic:\n  RowIterator(YALE_STORAGE* s_, IType* ija_, IType i_, size_t j_shape_, size_t j_offset_ = 0)\n    : s(s_),\n      ija(ija_),\n      a(reinterpret_cast<YALE_STORAGE*>(s->src)->a),\n      i(i_),\n      k(ija[i]),\n      k_end(ija[i+1]),\n      j_offset(j_offset_),\n      j_shape(j_shape_),\n      diag(row_has_no_nd() || diag_is_first()),\n      End(false),\n      init(default_value(s))\n    { }\n\n  RowIterator(YALE_STORAGE* s_, IType i_, size_t j_shape_, size_t j_offset_ = 0)\n    : s(s_),\n      ija(IJA(s)),\n      a(reinterpret_cast<YALE_STORAGE*>(s->src)->a),\n      i(i_),\n      k(ija[i]),\n      k_end(ija[i+1]),\n      j_offset(j_offset_),\n      j_shape(j_shape_),\n      diag(row_has_no_nd() || diag_is_first()),\n      End(false),\n      init(default_value(s))\n  { }\n\n  RowIterator(const RowIterator& rhs) : s(rhs.s), ija(rhs.ija), a(reinterpret_cast<YALE_STORAGE*>(s->src)->a), i(rhs.i), k(rhs.k), k_end(rhs.k_end), j_offset(rhs.j_offset), j_shape(rhs.j_shape), diag(rhs.diag), End(rhs.End), init(rhs.init) { }\n\n  VALUE obj() const {\n    return diag ? obj_at(s, i) : obj_at(s, k);\n  }\n\n  template <typename T>\n  T cobj() const {\n    if (typeid(T) == typeid(RubyObject)) return obj();\n    return A<T>(s)[diag ? i : k];\n  }\n\n  inline IType proper_j() const {\n    return diag ? i : ija[k];\n  }\n\n  inline IType offset_j() const {\n    return proper_j() - j_offset;\n  }\n\n  inline size_t capacity() const {\n    return reinterpret_cast<YALE_STORAGE*>(s->src)->capacity;\n  }\n\n  inline void vector_grow() {\n    YALE_STORAGE* src = reinterpret_cast<YALE_STORAGE*>(s->src);\n    nm::yale_storage::vector_grow(src);\n    ija = reinterpret_cast<IType*>(src->ija);\n    a   = src->a;\n  }\n\n  /* Returns true if an additional value is inserted, false if it goes on the diagonal */\n  bool insert(IType j, VALUE v) {\n    if (j == i) { // insert regardless on diagonal\n      reinterpret_cast<VALUE*>(a)[j] = v;\n      return false;\n\n    } else {\n      if (rb_funcall(v, rb_intern(\"!=\"), 1, init) == Qtrue) {\n        if (k >= capacity()) {\n          vector_grow();\n        }\n        reinterpret_cast<VALUE*>(a)[k] = v;\n        ija[k] = j;\n        k++;\n        return true;\n      }\n      return false;\n    }\n  }\n\n  void update_row_end() {\n    ija[i+1] = k;\n    k_end    = k;\n  }\n\n  /* Past the j_shape? */\n  inline bool end() const {\n    if (End)  return true;\n    //if (diag) return i - j_offset >= j_shape;\n    //else return k >= s->capacity || ija[k] - j_offset >= j_shape;\n    return (int)(diag ? i : ija[k]) - (int)(j_offset) >= (int)(j_shape);\n  }\n\n  inline bool row_has_no_nd() const { return ija[i] == k_end; /* k_start == k_end */  }\n  inline bool diag_is_first() const { return i < ija[ija[i]];  }\n  inline bool diag_is_last() const  { return i > ija[k_end-1]; } // only works if !row_has_no_nd()\n  inline bool k_is_last_nd() const  { return k == k_end-1;     }\n  inline bool k_is_last() const     { return k_is_last_nd() && !diag_is_last(); }\n  inline bool diag_is_ahead() const { return i > ija[k]; }\n  inline bool row_has_diag() const  { return i < s->shape[1];  }\n  inline bool diag_is_next() const  { // assumes we've already tested for diag, row_has_no_nd(), diag_is_first()\n    if (i == ija[k]+1) return true; // definite next\n    else if (k+1 < k_end && i >= ija[k+1]+1) return false; // at least one item before it\n    else return true;\n  }\n\n  RowIterator& operator++() {\n    if (diag) {                                             // we're at the diagonal\n      if (row_has_no_nd() || diag_is_last()) End = true;    //  and there are no non-diagonals (or none still to visit)\n      diag = false;\n    } else if (!row_has_diag()) {                           // row has no diagonal entries\n      if (row_has_no_nd() || k_is_last_nd()) End = true;    // row is totally empty, or we're at last entry\n      else k++;                                             // still entries to visit\n    } else { // not at diag but it exists somewhere in the row, and row has at least one nd entry\n      if (diag_is_ahead()) { // diag is ahead\n        if (k_is_last_nd()) diag = true; // diag is next and last\n        else if (diag_is_next()) {       // diag is next and not last\n          diag = true;\n          k++;\n        } else k++;                      // diag is not next\n      } else {                           // diag is past\n        if (k_is_last_nd()) End = true;  //   and we're at the end\n        else k++;                        //   and we're not at the end\n      }\n    }\n\n    return *this;\n  }\n\n\n  RowIterator operator++(int unused) {\n    RowIterator x(*this);\n    ++(*this);\n    return x;\n  }\n};\n\n\n// Helper function used only for the RETURN_SIZED_ENUMERATOR macro. Returns the length of\n// the matrix's storage.\nstatic VALUE nm_yale_stored_enumerator_length(VALUE nmatrix) {\n  NM_CONSERVATIVE(nm_register_value(&nmatrix));\n  YALE_STORAGE* s   = NM_STORAGE_YALE(nmatrix);\n  YALE_STORAGE* src = s->src == s ? s : reinterpret_cast<YALE_STORAGE*>(s->src);\n  size_t ia_size    = src->shape[0];\n  // FIXME: This needs to be corrected for slicing.\n  size_t len = std::min( s->shape[0] + s->offset[0], s->shape[1] + s->offset[1] ) + nm_yale_storage_get_size(src) -  ia_size;\n  NM_CONSERVATIVE(nm_unregister_value(&nmatrix));\n  return INT2FIX(len);\n}\n\n\n// Helper function used only for the RETURN_SIZED_ENUMERATOR macro. Returns the length of\n// the matrix's storage.\nstatic VALUE nm_yale_stored_nondiagonal_enumerator_length(VALUE nmatrix) {\n  NM_CONSERVATIVE(nm_register_value(&nmatrix));\n  YALE_STORAGE* s = NM_STORAGE_YALE(nmatrix);\n  if (s->src != s) s = reinterpret_cast<YALE_STORAGE*>(s->src);  // need to get the original storage shape\n\n  size_t ia_size = s->shape[0];\n  size_t len     = nm_yale_storage_get_size(NM_STORAGE_YALE(nmatrix)) - ia_size;\n  NM_CONSERVATIVE(nm_unregister_value(&nmatrix));\n  return INT2FIX(len);\n}\n\n// Helper function for diagonal length.\nstatic VALUE nm_yale_stored_diagonal_enumerator_length(VALUE nmatrix) {\n  NM_CONSERVATIVE(nm_register_value(&nmatrix));\n  YALE_STORAGE* s = NM_STORAGE_YALE(nmatrix);\n  size_t len = std::min( s->shape[0] + s->offset[0], s->shape[1] + s->offset[1] );\n  NM_CONSERVATIVE(nm_unregister_value(&nmatrix));\n  return INT2FIX(len);\n}\n\n\n// Helper function for full enumerator length.\nstatic VALUE nm_yale_enumerator_length(VALUE nmatrix) {\n  NM_CONSERVATIVE(nm_register_value(&nmatrix));\n  YALE_STORAGE* s = NM_STORAGE_YALE(nmatrix);\n  size_t len = s->shape[0] * s->shape[1];\n  NM_CONSERVATIVE(nm_unregister_value(&nmatrix));\n  return INT2FIX(len);\n}\n\n\n/*\n * Map the stored values of a matrix in storage order.\n */\ntemplate <typename D>\nstatic VALUE map_stored(VALUE self) {\n  NM_CONSERVATIVE(nm_register_value(&self));\n  YALE_STORAGE* s = NM_STORAGE_YALE(self);\n  YaleStorage<D> y(s);\n\n  RETURN_SIZED_ENUMERATOR_PRE\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n  RETURN_SIZED_ENUMERATOR(self, 0, 0, nm_yale_stored_enumerator_length);\n\n  YALE_STORAGE* r = y.template alloc_copy<nm::RubyObject, true>();\n  nm_yale_storage_register(r);\n  NMATRIX* m      = nm_create(nm::YALE_STORE, reinterpret_cast<STORAGE*>(r));\n  VALUE to_return = Data_Wrap_Struct(CLASS_OF(self), nm_mark, nm_delete, m);\n  nm_yale_storage_unregister(r);\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n  return to_return;\n}\n\n\n/*\n * map_stored which visits the stored entries of two matrices in order.\n */\ntemplate <typename LD, typename RD>\nstatic VALUE map_merged_stored(VALUE left, VALUE right, VALUE init) {\n  nm::YaleStorage<LD> l(NM_STORAGE_YALE(left));\n  nm::YaleStorage<RD> r(NM_STORAGE_YALE(right));\n  VALUE to_return = l.map_merged_stored(CLASS_OF(left), r, init);\n  return to_return;\n}\n\n\n/*\n * Iterate over the stored entries in Yale (diagonal and non-diagonal non-zeros)\n */\ntemplate <typename DType>\nstatic VALUE each_stored_with_indices(VALUE nm) {\n  NM_CONSERVATIVE(nm_register_value(&nm));\n  YALE_STORAGE* s = NM_STORAGE_YALE(nm);\n  YaleStorage<DType> y(s);\n\n  // If we don't have a block, return an enumerator.\n  RETURN_SIZED_ENUMERATOR_PRE\n  NM_CONSERVATIVE(nm_unregister_value(&nm));\n  RETURN_SIZED_ENUMERATOR(nm, 0, 0, nm_yale_stored_enumerator_length);\n\n  for (typename YaleStorage<DType>::const_stored_diagonal_iterator d = y.csdbegin(); d != y.csdend(); ++d) {\n    rb_yield_values(3, ~d, d.rb_i(), d.rb_j());\n  }\n\n  for (typename YaleStorage<DType>::const_row_iterator it = y.cribegin(); it != y.criend(); ++it) {\n    for (auto jt = it.ndbegin(); jt != it.ndend(); ++jt) {\n      rb_yield_values(3, ~jt, it.rb_i(), jt.rb_j());\n    }\n  }\n\n  NM_CONSERVATIVE(nm_unregister_value(&nm));\n\n  return nm;\n}\n\n\n/*\n * Iterate over the stored diagonal entries in Yale.\n */\ntemplate <typename DType>\nstatic VALUE stored_diagonal_each_with_indices(VALUE nm) {\n  NM_CONSERVATIVE(nm_register_value(&nm));\n\n  YALE_STORAGE* s = NM_STORAGE_YALE(nm);\n  YaleStorage<DType> y(s);\n\n  // If we don't have a block, return an enumerator.\n  RETURN_SIZED_ENUMERATOR_PRE\n  NM_CONSERVATIVE(nm_unregister_value(&nm));\n  RETURN_SIZED_ENUMERATOR(nm, 0, 0, nm_yale_stored_diagonal_length); // FIXME: need diagonal length\n\n  for (typename YaleStorage<DType>::const_stored_diagonal_iterator d = y.csdbegin(); d != y.csdend(); ++d) {\n    rb_yield_values(3, ~d, d.rb_i(), d.rb_j());\n  }\n\n  NM_CONSERVATIVE(nm_unregister_value(&nm));\n\n  return nm;\n}\n\n\n/*\n * Iterate over the stored diagonal entries in Yale.\n */\ntemplate <typename DType>\nstatic VALUE stored_nondiagonal_each_with_indices(VALUE nm) {\n  NM_CONSERVATIVE(nm_register_value(&nm));\n\n  YALE_STORAGE* s = NM_STORAGE_YALE(nm);\n  YaleStorage<DType> y(s);\n\n  // If we don't have a block, return an enumerator.\n  RETURN_SIZED_ENUMERATOR_PRE\n  NM_CONSERVATIVE(nm_unregister_value(&nm));\n  RETURN_SIZED_ENUMERATOR(nm, 0, 0, 0); // FIXME: need diagonal length\n\n  for (typename YaleStorage<DType>::const_row_iterator it = y.cribegin(); it != y.criend(); ++it) {\n    for (auto jt = it.ndbegin(); jt != it.ndend(); ++jt) {\n      rb_yield_values(3, ~jt, it.rb_i(), jt.rb_j());\n    }\n  }\n\n  NM_CONSERVATIVE(nm_unregister_value(&nm));\n\n  return nm;\n}\n\n\n/*\n * Iterate over the stored entries in Yale in order of i,j. Visits every diagonal entry, even if it's the default.\n */\ntemplate <typename DType>\nstatic VALUE each_ordered_stored_with_indices(VALUE nm) {\n  NM_CONSERVATIVE(nm_register_value(&nm));\n\n  YALE_STORAGE* s = NM_STORAGE_YALE(nm);\n  YaleStorage<DType> y(s);\n\n  // If we don't have a block, return an enumerator.\n  RETURN_SIZED_ENUMERATOR_PRE\n  NM_CONSERVATIVE(nm_unregister_value(&nm));\n  RETURN_SIZED_ENUMERATOR(nm, 0, 0, nm_yale_stored_enumerator_length);\n\n  for (typename YaleStorage<DType>::const_row_iterator it = y.cribegin(); it != y.criend(); ++it) {\n    for (auto jt = it.begin(); jt != it.end(); ++jt) {\n      rb_yield_values(3, ~jt, it.rb_i(), jt.rb_j());\n    }\n  }\n\n  NM_CONSERVATIVE(nm_unregister_value(&nm));\n\n  return nm;\n}\n\n\ntemplate <typename DType>\nstatic VALUE each_with_indices(VALUE nm) {\n  NM_CONSERVATIVE(nm_register_value(&nm));\n\n  YALE_STORAGE* s = NM_STORAGE_YALE(nm);\n  YaleStorage<DType> y(s);\n\n  // If we don't have a block, return an enumerator.\n  RETURN_SIZED_ENUMERATOR_PRE\n  NM_CONSERVATIVE(nm_unregister_value(&nm));\n  RETURN_SIZED_ENUMERATOR(nm, 0, 0, nm_yale_enumerator_length);\n\n  for (typename YaleStorage<DType>::const_iterator iter = y.cbegin(); iter != y.cend(); ++iter) {\n    rb_yield_values(3, ~iter, iter.rb_i(), iter.rb_j());\n  }\n\n  NM_CONSERVATIVE(nm_unregister_value(&nm));\n\n  return nm;\n}\n\ntemplate <typename D>\nstatic bool is_pos_default_value(YALE_STORAGE* s, size_t apos) {\n  YaleStorage<D> y(s);\n  return y.is_pos_default_value(apos);\n}\n\n} // end of namespace nm::yale_storage\n\n} // end of namespace nm.\n\n///////////////////\n// Ruby Bindings //\n///////////////////\n\n/* These bindings are mostly only for debugging Yale. They are called from Init_nmatrix. */\n\nextern \"C\" {\n\nvoid nm_init_yale_functions() {\n  /*\n   * This module stores methods that are useful for debugging Yale matrices,\n   * i.e. the ones with +:yale+ stype.\n   */\n  cNMatrix_YaleFunctions = rb_define_module_under(cNMatrix, \"YaleFunctions\");\n\n  // Expert recommendation. Eventually this should go in a separate gem, or at least a separate module.\n  rb_define_method(cNMatrix_YaleFunctions, \"yale_row_keys_intersection\", (METHOD)nm_row_keys_intersection, 3);\n\n  // Debugging functions.\n  rb_define_method(cNMatrix_YaleFunctions, \"yale_ija\", (METHOD)nm_ija, -1);\n  rb_define_method(cNMatrix_YaleFunctions, \"yale_a\", (METHOD)nm_a, -1);\n  rb_define_method(cNMatrix_YaleFunctions, \"yale_size\", (METHOD)nm_size, 0);\n  rb_define_method(cNMatrix_YaleFunctions, \"yale_ia\", (METHOD)nm_ia, 0);\n  rb_define_method(cNMatrix_YaleFunctions, \"yale_ja\", (METHOD)nm_ja, 0);\n  rb_define_method(cNMatrix_YaleFunctions, \"yale_d\", (METHOD)nm_d, -1);\n  rb_define_method(cNMatrix_YaleFunctions, \"yale_lu\", (METHOD)nm_lu, 0);\n\n  rb_define_method(cNMatrix_YaleFunctions, \"yale_nd_row\", (METHOD)nm_nd_row, -1);\n\n  /* Document-const:\n   * Defines the growth rate of the sparse NMatrix's size. Default is 1.5.\n   */\n  rb_define_const(cNMatrix_YaleFunctions, \"YALE_GROWTH_CONSTANT\", rb_float_new(nm::yale_storage::GROWTH_CONSTANT));\n\n  // This is so the user can easily check the IType size, mostly for debugging.\n  size_t itype_size = sizeof(IType);\n  VALUE itype_dtype;\n  if (itype_size == sizeof(uint64_t)) {\n    itype_dtype = ID2SYM(rb_intern(\"int64\"));\n  } else if (itype_size == sizeof(uint32_t)) {\n    itype_dtype = ID2SYM(rb_intern(\"int32\"));\n  } else if (itype_size == sizeof(uint16_t)) {\n    itype_dtype = ID2SYM(rb_intern(\"int16\"));\n  } else {\n    rb_raise(rb_eStandardError, \"unhandled length for sizeof(IType): %lu; note that IType is probably defined as size_t\", sizeof(IType));\n  }\n  rb_define_const(cNMatrix, \"INDEX_DTYPE\", itype_dtype);\n}\n\n/////////////////\n// C ACCESSORS //\n/////////////////\n\n/* C interface for NMatrix#each_with_indices (Yale) */\nVALUE nm_yale_each_with_indices(VALUE nmatrix) {\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::each_with_indices, VALUE, VALUE)\n\n  return ttable[ NM_DTYPE(nmatrix) ](nmatrix);\n}\n\n\n/* C interface for NMatrix#each_stored_with_indices (Yale) */\nVALUE nm_yale_each_stored_with_indices(VALUE nmatrix) {\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::each_stored_with_indices, VALUE, VALUE)\n\n  return ttable[ NM_DTYPE(nmatrix) ](nmatrix);\n}\n\n\n/* Iterate along stored diagonal (not actual diagonal!) */\nVALUE nm_yale_stored_diagonal_each_with_indices(VALUE nmatrix) {\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::stored_diagonal_each_with_indices, VALUE, VALUE)\n\n  return ttable[ NM_DTYPE(nmatrix) ](nmatrix);\n}\n\n/* Iterate through stored nondiagonal (not actual diagonal!) */\nVALUE nm_yale_stored_nondiagonal_each_with_indices(VALUE nmatrix) {\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::stored_nondiagonal_each_with_indices, VALUE, VALUE)\n\n  return ttable[ NM_DTYPE(nmatrix) ](nmatrix);\n}\n\n\n/* C interface for NMatrix#each_ordered_stored_with_indices (Yale) */\nVALUE nm_yale_each_ordered_stored_with_indices(VALUE nmatrix) {\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::each_ordered_stored_with_indices, VALUE, VALUE)\n\n  return ttable[ NM_DTYPE(nmatrix) ](nmatrix);\n}\n\n\n\n/*\n * C accessor for inserting some value in a matrix (or replacing an existing cell).\n */\nvoid nm_yale_storage_set(VALUE left, SLICE* slice, VALUE right) {\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::set, void, VALUE left, SLICE* slice, VALUE right);\n\n  ttable[NM_DTYPE(left)](left, slice, right);\n}\n\n\n/*\n * Determine the number of non-diagonal non-zeros in a not-yet-created copy of a slice or matrix.\n */\nstatic size_t yale_count_slice_copy_ndnz(const YALE_STORAGE* s, size_t* offset, size_t* shape) {\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::count_slice_copy_ndnz, size_t, const YALE_STORAGE*, size_t*, size_t*)\n\n  return ttable[s->dtype](s, offset, shape);\n}\n\n\n/*\n * C accessor for yale_storage::get, which returns a slice of YALE_STORAGE object by copy\n *\n * Slicing-related.\n */\nvoid* nm_yale_storage_get(const STORAGE* storage, SLICE* slice) {\n  YALE_STORAGE* casted_storage = (YALE_STORAGE*)storage;\n\n  if (slice->single) {\n    NAMED_DTYPE_TEMPLATE_TABLE(elem_copy_table,  nm::yale_storage::get_single, void*, YALE_STORAGE*, SLICE*)\n\n    return elem_copy_table[casted_storage->dtype](casted_storage, slice);\n  } else {\n    nm_yale_storage_register(casted_storage);\n    //return reinterpret_cast<void*>(nm::YaleStorage<nm::dtype_enum_T<storage->dtype>::type>(casted_storage).alloc_ref(slice));\n    NAMED_DTYPE_TEMPLATE_TABLE(ref_table, nm::yale_storage::ref, YALE_STORAGE*, YALE_STORAGE* storage, SLICE* slice)\n\n    YALE_STORAGE* ref = ref_table[casted_storage->dtype](casted_storage, slice);\n\n    NAMED_LR_DTYPE_TEMPLATE_TABLE(slice_copy_table, nm::yale_storage::slice_copy, YALE_STORAGE*, YALE_STORAGE*)\n\n    YALE_STORAGE* ns = slice_copy_table[casted_storage->dtype][casted_storage->dtype](ref);\n\n    NM_FREE(ref);\n\n    nm_yale_storage_unregister(casted_storage);\n\n    return ns;\n  }\n}\n\n/*\n * C accessor for yale_storage::vector_insert\n */\nstatic char nm_yale_storage_vector_insert(YALE_STORAGE* s, size_t pos, size_t* js, void* vals, size_t n, bool struct_only, nm::dtype_t dtype) {\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::vector_insert, char, YALE_STORAGE*, size_t, size_t*, void*, size_t, bool);\n\n  return ttable[dtype](s, pos, js, vals, n, struct_only);\n}\n\n/*\n * C accessor for yale_storage::increment_ia_after, typically called after ::vector_insert\n */\nstatic void nm_yale_storage_increment_ia_after(YALE_STORAGE* s, size_t ija_size, size_t i, long n) {\n  nm::yale_storage::increment_ia_after(s, ija_size, i, n);\n}\n\n\n/*\n * C accessor for yale_storage::ref, which returns either a pointer to the correct location in a YALE_STORAGE object\n * for some set of coordinates, or a pointer to a single element.\n */\nvoid* nm_yale_storage_ref(const STORAGE* storage, SLICE* slice) {\n  YALE_STORAGE* casted_storage = (YALE_STORAGE*)storage;\n\n  if (slice->single) {\n    //return reinterpret_cast<void*>(nm::YaleStorage<nm::dtype_enum_T<storage->dtype>::type>(casted_storage).get_single_p(slice));\n    NAMED_DTYPE_TEMPLATE_TABLE(elem_copy_table,  nm::yale_storage::get_single, void*, YALE_STORAGE*, SLICE*)\n    return elem_copy_table[casted_storage->dtype](casted_storage, slice);\n  } else {\n    //return reinterpret_cast<void*>(nm::YaleStorage<nm::dtype_enum_T<storage->dtype>::type>(casted_storage).alloc_ref(slice));\n    NAMED_DTYPE_TEMPLATE_TABLE(ref_table, nm::yale_storage::ref, YALE_STORAGE*, YALE_STORAGE* storage, SLICE* slice)\n    return reinterpret_cast<void*>(ref_table[casted_storage->dtype](casted_storage, slice));\n\n  }\n}\n\n\n/*\n * C accessor for determining whether two YALE_STORAGE objects have the same contents.\n */\nbool nm_yale_storage_eqeq(const STORAGE* left, const STORAGE* right) {\n  NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::eqeq, bool, const YALE_STORAGE* left, const YALE_STORAGE* right);\n\n  const YALE_STORAGE* casted_left = reinterpret_cast<const YALE_STORAGE*>(left);\n\n  return ttable[casted_left->dtype][right->dtype](casted_left, (const YALE_STORAGE*)right);\n}\n\n\n/*\n * Copy constructor for changing dtypes. (C accessor)\n */\nSTORAGE* nm_yale_storage_cast_copy(const STORAGE* rhs, nm::dtype_t new_dtype, void* dummy) {\n  NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::cast_copy, YALE_STORAGE*, const YALE_STORAGE* rhs);\n\n  const YALE_STORAGE* casted_rhs = reinterpret_cast<const YALE_STORAGE*>(rhs);\n  //return reinterpret_cast<STORAGE*>(nm::YaleStorage<nm::dtype_enum_T< rhs->dtype >::type>(rhs).alloc_copy<nm::dtype_enum_T< new_dtype >::type>());\n  return (STORAGE*)ttable[new_dtype][casted_rhs->dtype](casted_rhs);\n}\n\n\n/*\n * Returns size of Yale storage as a size_t (no matter what the itype is). (C accessor)\n */\nsize_t nm_yale_storage_get_size(const YALE_STORAGE* storage) {\n  return nm::yale_storage::get_size(storage);\n}\n\n\n\n/*\n * Return a pointer to the matrix's default value entry.\n */\nstatic void* default_value_ptr(const YALE_STORAGE* s) {\n  return reinterpret_cast<void*>(reinterpret_cast<char*>(((YALE_STORAGE*)(s->src))->a) + (((YALE_STORAGE*)(s->src))->shape[0] * DTYPE_SIZES[s->dtype]));\n}\n\n/*\n * Return the Ruby object at a given location in storage.\n */\nstatic VALUE obj_at(YALE_STORAGE* s, size_t k) {\n  if (s->dtype == nm::RUBYOBJ)  return reinterpret_cast<VALUE*>(((YALE_STORAGE*)(s->src))->a)[k];\n  else  return nm::rubyobj_from_cval(reinterpret_cast<void*>(reinterpret_cast<char*>(((YALE_STORAGE*)(s->src))->a) + k * DTYPE_SIZES[s->dtype]), s->dtype).rval;\n}\n\n\n/*\n * Return the matrix's default value as a Ruby VALUE.\n */\nstatic VALUE default_value(const YALE_STORAGE* s) {\n  if (s->dtype == nm::RUBYOBJ) return *reinterpret_cast<VALUE*>(default_value_ptr(s));\n  else return nm::rubyobj_from_cval(default_value_ptr(s), s->dtype).rval;\n}\n\n\n/*\n * Check to see if a default value is some form of zero. Easy for non-Ruby object matrices, which should always be 0.\n */\nstatic bool default_value_is_numeric_zero(const YALE_STORAGE* s) {\n  return rb_funcall(default_value(s), rb_intern(\"==\"), 1, INT2FIX(0)) == Qtrue;\n}\n\n\n\n/*\n * Transposing copy constructor.\n */\nSTORAGE* nm_yale_storage_copy_transposed(const STORAGE* rhs_base) {\n  YALE_STORAGE* rhs = (YALE_STORAGE*)rhs_base;\n  NAMED_DTYPE_TEMPLATE_TABLE(transp, nm::yale_storage::copy_transposed, YALE_STORAGE*, YALE_STORAGE*)\n  return (STORAGE*)(transp[rhs->dtype](rhs));\n}\n\n/*\n * C accessor for multiplying two YALE_STORAGE matrices, which have already been casted to the same dtype.\n *\n * FIXME: There should be some mathematical way to determine the worst-case IType based on the input ITypes. Right now\n * it just uses the default.\n */\nSTORAGE* nm_yale_storage_matrix_multiply(const STORAGE_PAIR& casted_storage, size_t* resulting_shape, bool vector) {\n  DTYPE_TEMPLATE_TABLE(nm::yale_storage::matrix_multiply, STORAGE*, const STORAGE_PAIR& casted_storage, size_t* resulting_shape, bool vector);\n\n  YALE_STORAGE* left = reinterpret_cast<YALE_STORAGE*>(casted_storage.left);\n  YALE_STORAGE* right = reinterpret_cast<YALE_STORAGE*>(casted_storage.right);\n\n  if (!default_value_is_numeric_zero(left) || !default_value_is_numeric_zero(right)) {\n    rb_raise(rb_eNotImpError, \"matrix default value must be some form of zero (not false or nil) for multiplication\");\n    return NULL;\n  }\n\n  return ttable[left->dtype](casted_storage, resulting_shape, vector);\n}\n\n\n///////////////\n// Lifecycle //\n///////////////\n\n/*\n * C accessor function for creating a YALE_STORAGE object. Prior to calling this function, you MUST\n * allocate shape (should be size_t * 2) -- don't use use a regular size_t array!\n *\n * For this type, dim must always be 2. The final argument is the initial capacity with which to\n * create the storage.\n */\n\nYALE_STORAGE* nm_yale_storage_create(nm::dtype_t dtype, size_t* shape, size_t dim, size_t init_capacity) {\n  if (dim != 2) {\n    rb_raise(nm_eStorageTypeError, \"yale supports only 2-dimensional matrices\");\n  }\n  DTYPE_OBJECT_STATIC_TABLE(nm::YaleStorage, create, YALE_STORAGE*, size_t* shape, size_t init_capacity)\n  return ttable[dtype](shape, init_capacity);\n}\n\n/*\n * Destructor for yale storage (C-accessible).\n */\nvoid nm_yale_storage_delete(STORAGE* s) {\n  if (s) {\n    YALE_STORAGE* storage = (YALE_STORAGE*)s;\n    if (storage->count-- == 1) {\n      NM_FREE(storage->shape);\n      NM_FREE(storage->offset);\n      NM_FREE(storage->ija);\n      NM_FREE(storage->a);\n      NM_FREE(storage);\n    }\n  }\n}\n\n/*\n * Destructor for the yale storage ref\n */\nvoid nm_yale_storage_delete_ref(STORAGE* s) {\n  if (s) {\n    YALE_STORAGE* storage = (YALE_STORAGE*)s;\n    nm_yale_storage_delete( reinterpret_cast<STORAGE*>(storage->src) );\n    NM_FREE(storage->shape);\n    NM_FREE(storage->offset);\n    NM_FREE(s);\n  }\n}\n\n/*\n * C accessor for yale_storage::init, a templated function.\n *\n * Initializes the IJA vector of the YALE_STORAGE matrix.\n */\nvoid nm_yale_storage_init(YALE_STORAGE* s, void* init_val) {\n  DTYPE_TEMPLATE_TABLE(nm::yale_storage::init, void, YALE_STORAGE*, void*);\n\n  ttable[s->dtype](s, init_val);\n}\n\n\n/*\n * Ruby GC mark function for YALE_STORAGE. C accessible.\n */\nvoid nm_yale_storage_mark(STORAGE* storage_base) {\n  YALE_STORAGE* storage = (YALE_STORAGE*)storage_base;\n\n  if (storage && storage->dtype == nm::RUBYOBJ) {\n\n    VALUE* a = (VALUE*)(storage->a);\n    rb_gc_mark_locations(a, &(a[storage->capacity-1]));\n  }\n}\n\nvoid nm_yale_storage_register_a(void* a, size_t size) {\n  nm_register_values(reinterpret_cast<VALUE*>(a), size);\n}\n\nvoid nm_yale_storage_unregister_a(void* a, size_t size) {\n  nm_unregister_values(reinterpret_cast<VALUE*>(a), size);\n}\n\nvoid nm_yale_storage_register(const STORAGE* s) {\n  const YALE_STORAGE* y = reinterpret_cast<const YALE_STORAGE*>(s);\n  if (y->dtype == nm::RUBYOBJ) {\n    nm_register_values(reinterpret_cast<VALUE*>(y->a), nm::yale_storage::get_size(y));\n  }\n}\n\nvoid nm_yale_storage_unregister(const STORAGE* s) {\n  const YALE_STORAGE* y = reinterpret_cast<const YALE_STORAGE*>(s);\n  if (y->dtype == nm::RUBYOBJ) {\n    nm_unregister_values(reinterpret_cast<VALUE*>(y->a), nm::yale_storage::get_size(y));\n  }\n}\n\n/*\n * Allocates and initializes the basic struct (but not the IJA or A vectors).\n *\n * This function is ONLY used when creating from old yale.\n */\nstatic YALE_STORAGE* alloc(nm::dtype_t dtype, size_t* shape, size_t dim) {\n  YALE_STORAGE* s;\n\n  s = NM_ALLOC( YALE_STORAGE );\n\n  s->ndnz        = 0;\n  s->dtype       = dtype;\n  s->shape       = shape;\n  s->offset      = NM_ALLOC_N(size_t, dim);\n  for (size_t i = 0; i < dim; ++i)\n    s->offset[i] = 0;\n  s->dim         = dim;\n  s->src         = reinterpret_cast<STORAGE*>(s);\n  s->count       = 1;\n\n  return s;\n}\n\nYALE_STORAGE* nm_yale_storage_create_from_old_yale(nm::dtype_t dtype, size_t* shape, char* ia, char* ja, char* a, nm::dtype_t from_dtype) {\n  NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::create_from_old_yale, YALE_STORAGE*, nm::dtype_t dtype, size_t* shape, char* r_ia, char* r_ja, char* r_a);\n\n  return ttable[dtype][from_dtype](dtype, shape, ia, ja, a);\n\n}\n\n//////////////////////////////////////////////\n// YALE-SPECIFIC FUNCTIONS (RUBY ACCESSORS) //\n//////////////////////////////////////////////\n\n/*\n * call-seq:\n *     yale_size -> Integer\n *\n * Get the size of a Yale matrix (the number of elements actually stored).\n *\n * For capacity (the maximum number of elements that can be stored without a resize), use capacity instead.\n */\nstatic VALUE nm_size(VALUE self) {\n  YALE_STORAGE* s = (YALE_STORAGE*)(NM_SRC(self));\n  VALUE to_return = INT2FIX(nm::yale_storage::IJA(s)[s->shape[0]]);\n  return to_return;\n}\n\n\n/*\n * Determine if some pos in the diagonal is the default. No bounds checking!\n */\nstatic bool is_pos_default_value(YALE_STORAGE* s, size_t apos) {\n  DTYPE_TEMPLATE_TABLE(nm::yale_storage::is_pos_default_value, bool, YALE_STORAGE*, size_t)\n  return ttable[s->dtype](s, apos);\n}\n\n\n/*\n * call-seq:\n *     yale_row_keys_intersection(i, m2, i2) -> Array\n *\n * This function is experimental.\n *\n * It finds the intersection of row i of the current matrix with row i2 of matrix m2.\n * Both matrices must be Yale. They may not be slices.\n *\n * Only checks the stored indices; does not care about matrix default value.\n */\nstatic VALUE nm_row_keys_intersection(VALUE m1, VALUE ii1, VALUE m2, VALUE ii2) {\n\n  NM_CONSERVATIVE(nm_register_value(&m1));\n  NM_CONSERVATIVE(nm_register_value(&m2));\n\n  if (NM_SRC(m1) != NM_STORAGE(m1) || NM_SRC(m2) != NM_STORAGE(m2)) {\n    NM_CONSERVATIVE(nm_unregister_value(&m2));\n    NM_CONSERVATIVE(nm_unregister_value(&m1));\n    rb_raise(rb_eNotImpError, \"must be called on a real matrix and not a slice\");\n  }\n\n  size_t i1 = FIX2INT(ii1),\n         i2 = FIX2INT(ii2);\n\n  YALE_STORAGE *s   = NM_STORAGE_YALE(m1),\n               *t   = NM_STORAGE_YALE(m2);\n\n  size_t pos1 = s->ija[i1],\n         pos2 = t->ija[i2];\n\n  size_t nextpos1 = s->ija[i1+1],\n         nextpos2 = t->ija[i2+1];\n\n  size_t diff1 = nextpos1 - pos1,\n         diff2 = nextpos2 - pos2;\n\n  // Does the diagonal have a nonzero in it?\n  bool diag1 = i1 < s->shape[0] && !is_pos_default_value(s, i1),\n       diag2 = i2 < t->shape[0] && !is_pos_default_value(t, i2);\n\n  // Reserve max(diff1,diff2) space -- that's the max intersection possible.\n  VALUE ret = rb_ary_new2(std::max(diff1,diff2)+1);\n  nm_register_value(&ret);\n\n  // Handle once the special case where both have the diagonal in exactly\n  // the same place.\n  if (diag1 && diag2 && i1 == i2) {\n    rb_ary_push(ret, INT2FIX(i1));\n    diag1 = false; diag2 = false; // no need to deal with diagonals anymore.\n  }\n\n  // Now find the intersection.\n  size_t idx1 = pos1, idx2 = pos2;\n  while (idx1 < nextpos1 && idx2 < nextpos2) {\n    if (s->ija[idx1] == t->ija[idx2]) {\n      rb_ary_push(ret, INT2FIX(s->ija[idx1]));\n      ++idx1; ++idx2;\n    } else if (diag1 && i1 == t->ija[idx2]) {\n      rb_ary_push(ret, INT2FIX(i1));\n      diag1 = false;\n      ++idx2;\n    } else if (diag2 && i2 == s->ija[idx1]) {\n      rb_ary_push(ret, INT2FIX(i2));\n      diag2 = false;\n      ++idx1;\n    } else if (s->ija[idx1] < t->ija[idx2]) {\n      ++idx1;\n    } else { // s->ija[idx1] > t->ija[idx2]\n      ++idx2;\n    }\n  }\n\n  // Past the end of row i2's stored entries; need to try to find diagonal\n  if (diag2 && idx1 < nextpos1) {\n    idx1 = nm::yale_storage::binary_search_left_boundary(s, idx1, nextpos1, i2);\n    if (s->ija[idx1] == i2) rb_ary_push(ret, INT2FIX(i2));\n  }\n\n  // Find the diagonal, if possible, in the other one.\n  if (diag1 && idx2 < nextpos2) {\n    idx2 = nm::yale_storage::binary_search_left_boundary(t, idx2, nextpos2, i1);\n    if (t->ija[idx2] == i1) rb_ary_push(ret, INT2FIX(i1));\n  }\n\n  nm_unregister_value(&ret);\n  NM_CONSERVATIVE(nm_unregister_value(&m1));\n  NM_CONSERVATIVE(nm_unregister_value(&m2));\n\n  return ret;\n}\n\n\n/*\n * call-seq:\n *     yale_a -> Array\n *     yale_d(index) -> ...\n *\n * Get the A array of a Yale matrix (which stores the diagonal and the LU portions of the matrix).\n */\nstatic VALUE nm_a(int argc, VALUE* argv, VALUE self) {\n  NM_CONSERVATIVE(nm_register_value(&self));\n\n  VALUE idx;\n  rb_scan_args(argc, argv, \"01\", &idx);\n  NM_CONSERVATIVE(nm_register_value(&idx));\n\n  YALE_STORAGE* s = reinterpret_cast<YALE_STORAGE*>(NM_SRC(self));\n  size_t size = nm_yale_storage_get_size(s);\n\n  if (idx == Qnil) {\n\n    VALUE* vals = NM_ALLOCA_N(VALUE, size);\n\n    nm_register_values(vals, size);\n\n    if (NM_DTYPE(self) == nm::RUBYOBJ) {\n      for (size_t i = 0; i < size; ++i) {\n        vals[i] = reinterpret_cast<VALUE*>(s->a)[i];\n      }\n    } else {\n      for (size_t i = 0; i < size; ++i) {\n        vals[i] = nm::rubyobj_from_cval((char*)(s->a) + DTYPE_SIZES[s->dtype]*i, s->dtype).rval;\n      }\n    }\n    VALUE ary = rb_ary_new4(size, vals);\n\n    for (size_t i = size; i < s->capacity; ++i)\n      rb_ary_push(ary, Qnil);\n\n    nm_unregister_values(vals, size);\n    NM_CONSERVATIVE(nm_unregister_value(&idx));\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n    return ary;\n  } else {\n    size_t index = FIX2INT(idx);\n    NM_CONSERVATIVE(nm_unregister_value(&idx));\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n    if (index >= size) rb_raise(rb_eRangeError, \"out of range\");\n    return nm::rubyobj_from_cval((char*)(s->a) + DTYPE_SIZES[s->dtype] * index, s->dtype).rval;\n  }\n}\n\n\n/*\n * call-seq:\n *     yale_d -> Array\n *     yale_d(index) -> ...\n *\n * Get the diagonal (\"D\") portion of the A array of a Yale matrix.\n */\nstatic VALUE nm_d(int argc, VALUE* argv, VALUE self) {\n  NM_CONSERVATIVE(nm_register_value(&self));\n  VALUE idx;\n  rb_scan_args(argc, argv, \"01\", &idx);\n  NM_CONSERVATIVE(nm_register_value(&idx));\n\n  YALE_STORAGE* s = reinterpret_cast<YALE_STORAGE*>(NM_SRC(self));\n\n  if (idx == Qnil) {\n    VALUE* vals = NM_ALLOCA_N(VALUE, s->shape[0]);\n\n    nm_register_values(vals, s->shape[0]);\n\n    if (NM_DTYPE(self) == nm::RUBYOBJ) {\n      for (size_t i = 0; i < s->shape[0]; ++i) {\n        vals[i] = reinterpret_cast<VALUE*>(s->a)[i];\n      }\n    } else {\n      for (size_t i = 0; i < s->shape[0]; ++i) {\n        vals[i] = nm::rubyobj_from_cval((char*)(s->a) + DTYPE_SIZES[s->dtype]*i, s->dtype).rval;\n      }\n    }\n    nm_unregister_values(vals, s->shape[0]);\n    NM_CONSERVATIVE(nm_unregister_value(&idx));\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n\n    return rb_ary_new4(s->shape[0], vals);\n  } else {\n    size_t index = FIX2INT(idx);\n    NM_CONSERVATIVE(nm_unregister_value(&idx));\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n    if (index >= s->shape[0]) rb_raise(rb_eRangeError, \"out of range\");\n    return nm::rubyobj_from_cval((char*)(s->a) + DTYPE_SIZES[s->dtype] * index, s->dtype).rval;\n  }\n}\n\n/*\n * call-seq:\n *     yale_lu -> Array\n *\n * Get the non-diagonal (\"LU\") portion of the A array of a Yale matrix.\n */\nstatic VALUE nm_lu(VALUE self) {\n  NM_CONSERVATIVE(nm_register_value(&self));\n\n  YALE_STORAGE* s = reinterpret_cast<YALE_STORAGE*>(NM_SRC(self));\n\n  size_t size = nm_yale_storage_get_size(s);\n\n  VALUE* vals = NM_ALLOCA_N(VALUE, size - s->shape[0] - 1);\n\n  nm_register_values(vals, size - s->shape[0] - 1);\n\n  if (NM_DTYPE(self) == nm::RUBYOBJ) {\n    for (size_t i = 0; i < size - s->shape[0] - 1; ++i) {\n      vals[i] = reinterpret_cast<VALUE*>(s->a)[s->shape[0] + 1 + i];\n    }\n  } else {\n    for (size_t i = 0; i < size - s->shape[0] - 1; ++i) {\n      vals[i] = nm::rubyobj_from_cval((char*)(s->a) + DTYPE_SIZES[s->dtype]*(s->shape[0] + 1 + i), s->dtype).rval;\n    }\n  }\n\n  VALUE ary = rb_ary_new4(size - s->shape[0] - 1, vals);\n\n  for (size_t i = size; i < s->capacity; ++i)\n    rb_ary_push(ary, Qnil);\n\n  nm_unregister_values(vals, size - s->shape[0] - 1);\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n\n  return ary;\n}\n\n/*\n * call-seq:\n *     yale_ia -> Array\n *\n * Get the IA portion of the IJA array of a Yale matrix. This gives the start and end positions of rows in the\n * JA and LU portions of the IJA and A arrays, respectively.\n */\nstatic VALUE nm_ia(VALUE self) {\n  NM_CONSERVATIVE(nm_register_value(&self));\n\n  YALE_STORAGE* s = reinterpret_cast<YALE_STORAGE*>(NM_SRC(self));\n\n  VALUE* vals = NM_ALLOCA_N(VALUE, s->shape[0] + 1);\n\n  for (size_t i = 0; i < s->shape[0] + 1; ++i) {\n    vals[i] = INT2FIX(s->ija[i]);\n  }\n\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n\n  return rb_ary_new4(s->shape[0]+1, vals);\n}\n\n/*\n * call-seq:\n *     yale_ja -> Array\n *\n * Get the JA portion of the IJA array of a Yale matrix. This gives the column indices for entries in corresponding\n * positions in the LU portion of the A array.\n */\nstatic VALUE nm_ja(VALUE self) {\n\n  NM_CONSERVATIVE(nm_register_value(&self));\n\n  YALE_STORAGE* s = reinterpret_cast<YALE_STORAGE*>(NM_SRC(self));\n\n  size_t size = nm_yale_storage_get_size(s);\n\n  VALUE* vals = NM_ALLOCA_N(VALUE, size - s->shape[0] - 1);\n\n  nm_register_values(vals, size - s->shape[0] - 1);\n\n  for (size_t i = 0; i < size - s->shape[0] - 1; ++i) {\n    vals[i] = INT2FIX(s->ija[s->shape[0] + 1 + i]);\n  }\n\n  VALUE ary = rb_ary_new4(size - s->shape[0] - 1, vals);\n\n  for (size_t i = size; i < s->capacity; ++i)\n    rb_ary_push(ary, Qnil);\n\n  nm_unregister_values(vals, size - s->shape[0] - 1);\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n\n  return ary;\n}\n\n/*\n * call-seq:\n *     yale_ija -> Array\n *     yale_ija(index) -> ...\n *\n * Get the IJA array of a Yale matrix (or a component of the IJA array).\n */\nstatic VALUE nm_ija(int argc, VALUE* argv, VALUE self) {\n  NM_CONSERVATIVE(nm_register_value(&self));\n\n  VALUE idx;\n  rb_scan_args(argc, argv, \"01\", &idx);\n  NM_CONSERVATIVE(nm_register_value(&idx));\n\n  YALE_STORAGE* s = reinterpret_cast<YALE_STORAGE*>(NM_SRC(self));\n  size_t size = nm_yale_storage_get_size(s);\n\n  if (idx == Qnil) {\n\n    VALUE* vals = NM_ALLOCA_N(VALUE, size);\n\n    nm_register_values(vals, size);\n\n    for (size_t i = 0; i < size; ++i) {\n      vals[i] = INT2FIX(s->ija[i]);\n    }\n\n   VALUE ary = rb_ary_new4(size, vals);\n\n    for (size_t i = size; i < s->capacity; ++i)\n      rb_ary_push(ary, Qnil);\n\n    nm_unregister_values(vals, size);\n    NM_CONSERVATIVE(nm_unregister_value(&idx));\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n\n    return ary;\n\n  } else {\n    size_t index = FIX2INT(idx);\n    if (index >= size) rb_raise(rb_eRangeError, \"out of range\");\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n    NM_CONSERVATIVE(nm_unregister_value(&idx));\n    return INT2FIX(s->ija[index]);\n  }\n}\n\n\n/*\n * call-seq:\n *     yale_nd_row -> ...\n *\n * This function gets the non-diagonal contents of a Yale matrix row.\n * The first argument should be the row index. The optional second argument may be :hash or :keys, but defaults\n * to :hash. If :keys is given, it will only return the Hash keys (the column indices).\n *\n * This function is meant to accomplish its purpose as efficiently as possible. It does not check for appropriate\n * range.\n */\nstatic VALUE nm_nd_row(int argc, VALUE* argv, VALUE self) {\n\n  NM_CONSERVATIVE(nm_register_value(&self));\n  if (NM_SRC(self) != NM_STORAGE(self)) {\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n    rb_raise(rb_eNotImpError, \"must be called on a real matrix and not a slice\");\n  }\n\n  VALUE i_, as;\n  rb_scan_args(argc, argv, \"11\", &i_, &as);\n  NM_CONSERVATIVE(nm_register_value(&as));\n  NM_CONSERVATIVE(nm_register_value(&i_));\n\n  bool keys = false;\n  if (as != Qnil && rb_to_id(as) != nm_rb_hash) keys = true;\n\n  size_t i = FIX2INT(i_);\n\n  YALE_STORAGE* s   = NM_STORAGE_YALE(self);\n  //nm::dtype_t dtype = NM_DTYPE(self);\n\n  if (i >= s->shape[0]) {\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n    NM_CONSERVATIVE(nm_unregister_value(&as));\n    NM_CONSERVATIVE(nm_unregister_value(&i_));\n    rb_raise(rb_eRangeError, \"out of range (%lu >= %lu)\", i, s->shape[0]);\n  }\n\n  size_t pos = s->ija[i];\n  size_t nextpos = s->ija[i+1];\n  size_t diff = nextpos - pos;\n\n  VALUE ret;\n  if (keys) {\n    ret = rb_ary_new3(diff);\n\n    for (size_t idx = pos; idx < nextpos; ++idx) {\n      rb_ary_store(ret, idx - pos, INT2FIX(s->ija[idx]));\n    }\n\n  } else {\n    ret = rb_hash_new();\n\n    for (size_t idx = pos; idx < nextpos; ++idx) {\n      rb_hash_aset(ret, INT2FIX(s->ija[idx]), nm::rubyobj_from_cval((char*)(s->a) + DTYPE_SIZES[s->dtype]*idx, s->dtype).rval);\n    }\n  }\n  NM_CONSERVATIVE(nm_unregister_value(&as));\n  NM_CONSERVATIVE(nm_unregister_value(&i_));\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n  return ret;\n}\n\n/*\n * call-seq:\n *     yale_vector_set(i, column_index_array, cell_contents_array, pos) -> Fixnum\n *\n * Insert at position pos an array of non-diagonal elements with column indices given. Note that the column indices and values\n * must be storage-contiguous -- that is, you can't insert them around existing elements in some row, only amid some\n * elements in some row. You *can* insert them around a diagonal element, since this is stored separately. This function\n * may not be used for the insertion of diagonal elements in most cases, as these are already present in the data\n * structure and are typically modified by replacement rather than insertion.\n *\n * The last argument, pos, may be nil if you want to insert at the beginning of a row. Otherwise it needs to be provided.\n * Don't expect this function to know the difference. It really does very little checking, because its goal is to make\n * multiple contiguous insertion as quick as possible.\n *\n * You should also not attempt to insert values which are the default (0). These are not supposed to be stored, and may\n * lead to undefined behavior.\n *\n * Example:\n *    m.yale_vector_set(3, [0,3,4], [1,1,1], 15)\n *\n * The example above inserts the values 1, 1, and 1 in columns 0, 3, and 4, assumed to be located at position 15 (which\n * corresponds to row 3).\n *\n * Example:\n *    next = m.yale_vector_set(3, [0,3,4], [1,1,1])\n *\n * This example determines that i=3 is at position 15 automatically. The value returned, next, is the position where the\n * next value(s) should be inserted.\n */\nVALUE nm_vector_set(int argc, VALUE* argv, VALUE self) { //, VALUE i_, VALUE jv, VALUE vv, VALUE pos_) {\n\n  NM_CONSERVATIVE(nm_register_value(&self));\n\n  if (NM_SRC(self) != NM_STORAGE(self)) {\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n    rb_raise(rb_eNotImpError, \"must be called on a real matrix and not a slice\");\n  }\n\n  // i, jv, vv are mandatory; pos is optional; thus \"31\"\n  VALUE i_, jv, vv, pos_;\n  rb_scan_args(argc, argv, \"31\", &i_, &jv, &vv, &pos_);\n  NM_CONSERVATIVE(nm_register_value(&i_));\n  NM_CONSERVATIVE(nm_register_value(&jv));\n  NM_CONSERVATIVE(nm_register_value(&vv));\n  NM_CONSERVATIVE(nm_register_value(&pos_));\n\n  size_t len   = RARRAY_LEN(jv); // need length in order to read the arrays in\n  size_t vvlen = RARRAY_LEN(vv);\n\n  if (len != vvlen) {\n    NM_CONSERVATIVE(nm_unregister_value(&pos_));\n    NM_CONSERVATIVE(nm_unregister_value(&vv));\n    NM_CONSERVATIVE(nm_unregister_value(&jv));\n    NM_CONSERVATIVE(nm_unregister_value(&i_));\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n    rb_raise(rb_eArgError, \"lengths must match between j array (%lu) and value array (%lu)\", len, vvlen);\n  }\n\n  YALE_STORAGE* s   = NM_STORAGE_YALE(self);\n  nm::dtype_t dtype = NM_DTYPE(self);\n\n  size_t i   = FIX2INT(i_);    // get the row\n  size_t pos = s->ija[i];\n\n  // Allocate the j array and the values array\n  size_t* j  = NM_ALLOCA_N(size_t, len);\n  void* vals = NM_ALLOCA_N(char, DTYPE_SIZES[dtype] * len);\n  if (dtype == nm::RUBYOBJ){\n    nm_register_values(reinterpret_cast<VALUE*>(vals), len);\n  }\n\n  // Copy array contents\n  for (size_t idx = 0; idx < len; ++idx) {\n    j[idx] = FIX2INT(rb_ary_entry(jv, idx));\n    rubyval_to_cval(rb_ary_entry(vv, idx), dtype, (char*)vals + idx * DTYPE_SIZES[dtype]);\n  }\n\n  nm_yale_storage_vector_insert(s, pos, j, vals, len, false, dtype);\n  nm_yale_storage_increment_ia_after(s, s->shape[0], i, len);\n  s->ndnz += len;\n\n  if (dtype == nm::RUBYOBJ){\n    nm_unregister_values(reinterpret_cast<VALUE*>(vals), len);\n  }\n\n  NM_CONSERVATIVE(nm_unregister_value(&pos_));\n  NM_CONSERVATIVE(nm_unregister_value(&vv));\n  NM_CONSERVATIVE(nm_unregister_value(&jv));\n  NM_CONSERVATIVE(nm_unregister_value(&i_));\n  NM_CONSERVATIVE(nm_unregister_value(&self));\n\n  // Return the updated position\n  pos += len;\n  return INT2FIX(pos);\n}\n\n\n\n\n/*\n * call-seq:\n *     __yale_default_value__ -> ...\n *\n * Get the default_value property from a yale matrix.\n */\nVALUE nm_yale_default_value(VALUE self) {\n  VALUE to_return = default_value(NM_STORAGE_YALE(self));\n  return to_return;\n}\n\n\n/*\n * call-seq:\n *     __yale_map_merged_stored__(right) -> Enumerator\n *\n * A map operation on two Yale matrices which only iterates across the stored indices.\n */\nVALUE nm_yale_map_merged_stored(VALUE left, VALUE right, VALUE init) {\n  NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::map_merged_stored, VALUE, VALUE, VALUE, VALUE)\n  return ttable[NM_DTYPE(left)][NM_DTYPE(right)](left, right, init);\n  //return nm::yale_storage::map_merged_stored(left, right, init);\n}\n\n\n/*\n * call-seq:\n *     __yale_map_stored__ -> Enumerator\n *\n * A map operation on two Yale matrices which only iterates across the stored indices.\n */\nVALUE nm_yale_map_stored(VALUE self) {\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::map_stored, VALUE, VALUE)\n  return ttable[NM_DTYPE(self)](self);\n}\n\n} // end of extern \"C\" block\n"
  },
  {
    "path": "ext/nmatrix/storage/yale/yale.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == yale.h\n//\n// \"new yale\" storage format for 2D matrices (like yale, but with\n// the diagonal pulled out for O(1) access).\n//\n// Specifications:\n// * dtype and index dtype must necessarily differ\n//      * index dtype is defined by whatever unsigned type can store\n//        max(rows,cols)\n//      * that means vector ija stores only index dtype, but a stores\n//        dtype\n// * vectors must be able to grow as necessary\n//      * maximum size is rows*cols+1\n\n#ifndef YALE_H\n#define YALE_H\n\n/*\n * Standard Includes\n */\n\n#include <ruby.h>\n#include <limits> // for std::numeric_limits<T>::max()\n#include <stdexcept>\n\n/*\n * Project Includes\n */\n\n#include \"../../types.h\"\n#include \"../../data/data.h\"\n#include \"../common.h\"\n#include \"../../nmatrix.h\"\n\nextern \"C\" {\n\n  /*\n   * Macros\n   */\n\n  #ifndef NM_CHECK_ALLOC\n   #define NM_CHECK_ALLOC(x) if (!x) rb_raise(rb_eNoMemError, \"insufficient memory\");\n  #endif\n\n  /*\n   * Types\n   */\n\n\n  /*\n   * Data\n   */\n\n\n  /*\n   * Functions\n   */\n\n  ///////////////\n  // Lifecycle //\n  ///////////////\n\n  YALE_STORAGE* nm_yale_storage_create(nm::dtype_t dtype, size_t* shape, size_t dim, size_t init_capacity);\n  YALE_STORAGE* nm_yale_storage_create_from_old_yale(nm::dtype_t dtype, size_t* shape, char* ia, char* ja, char* a, nm::dtype_t from_dtype);\n  YALE_STORAGE*  nm_yale_storage_create_merged(const YALE_STORAGE* merge_template, const YALE_STORAGE* other);\n  void          nm_yale_storage_delete(STORAGE* s);\n  void          nm_yale_storage_delete_ref(STORAGE* s);\n  void          nm_yale_storage_init(YALE_STORAGE* s, void* default_val);\n  void          nm_yale_storage_mark(STORAGE*);\n  void          nm_yale_storage_register(const STORAGE* s);\n  void          nm_yale_storage_unregister(const STORAGE* s);\n  void    nm_yale_storage_register_a(void* a, size_t size);\n  void    nm_yale_storage_unregister_a(void* a, size_t size); \n    \n  ///////////////\n  // Accessors //\n  ///////////////\n\n  VALUE nm_yale_each_with_indices(VALUE nmatrix);\n  VALUE nm_yale_each_stored_with_indices(VALUE nmatrix);\n  VALUE nm_yale_stored_diagonal_each_with_indices(VALUE nmatrix);\n  VALUE nm_yale_stored_nondiagonal_each_with_indices(VALUE nmatrix);\n  VALUE nm_yale_each_ordered_stored_with_indices(VALUE nmatrix);\n  void* nm_yale_storage_get(const STORAGE* s, SLICE* slice);\n  void*  nm_yale_storage_ref(const STORAGE* s, SLICE* slice);\n  void  nm_yale_storage_set(VALUE left, SLICE* slice, VALUE right);\n\n  //char  nm_yale_storage_vector_insert(YALE_STORAGE* s, size_t pos, size_t* js, void* vals, size_t n, bool struct_only, nm::dtype_t dtype, nm::itype_t itype);\n  //void  nm_yale_storage_increment_ia_after(YALE_STORAGE* s, size_t ija_size, size_t i, size_t n);\n\n  size_t  nm_yale_storage_get_size(const YALE_STORAGE* storage);\n  VALUE   nm_yale_default_value(VALUE self);\n  VALUE   nm_yale_map_stored(VALUE self);\n  VALUE   nm_yale_map_merged_stored(VALUE left, VALUE right, VALUE init);\n\n  ///////////\n  // Tests //\n  ///////////\n\n  bool nm_yale_storage_eqeq(const STORAGE* left, const STORAGE* right);\n\n  //////////\n  // Math //\n  //////////\n\n  STORAGE* nm_yale_storage_matrix_multiply(const STORAGE_PAIR& casted_storage, size_t* resulting_shape, bool vector);\n\n  /////////////\n  // Utility //\n  /////////////\n\n\n\n  /////////////////////////\n  // Copying and Casting //\n  /////////////////////////\n\n  STORAGE*      nm_yale_storage_cast_copy(const STORAGE* rhs, nm::dtype_t new_dtype, void*);\n  STORAGE*      nm_yale_storage_copy_transposed(const STORAGE* rhs_base);\n\n\n\n  void nm_init_yale_functions(void);\n\n  VALUE nm_vector_set(int argc, VALUE* argv, VALUE self);\n\n\n} // end of extern \"C\" block\n\nnamespace nm {\n\nnamespace yale_storage {\n\n  /*\n   * Typedefs\n   */\n\n  typedef size_t IType;\n\n\n  /*\n   * Templated Functions\n   */\n\n  int binary_search(YALE_STORAGE* s, IType left, IType right, IType key);\n\n  /*\n   * Clear out the D portion of the A vector (clearing the diagonal and setting\n   * the zero value).\n   *\n   * Note: This sets a literal 0 value. If your dtype is RUBYOBJ (a Ruby object),\n   * it'll actually be INT2FIX(0) instead of a string of NULLs. You can actually\n   * set a default for Ruby objects other than zero -- you generally want it to\n   * be Qfalse, Qnil, or INT2FIX(0). The last is the default.\n   */\n  template <typename DType>\n  inline void clear_diagonal_and_zero(YALE_STORAGE* s, void* init_val) {\n    DType* a = reinterpret_cast<DType*>(s->a);\n\n    // Clear out the diagonal + one extra entry\n    if (init_val) {\n      for (size_t i = 0; i <= s->shape[0]; ++i) // insert Ruby zeros, falses, or whatever else.\n        a[i] = *reinterpret_cast<DType*>(init_val);\n    } else {\n      for (size_t i = 0; i <= s->shape[0]; ++i) // insert zeros.\n        a[i] = 0;\n    }\n  }\n\n  template <typename DType>\n  void init(YALE_STORAGE* s, void* init_val);\n\n  size_t  get_size(const YALE_STORAGE* storage);\n\n  IType binary_search_left_boundary(const YALE_STORAGE* s, IType left, IType right, IType bound);\n\n\n}} // end of namespace nm::yale_storage\n\n#endif // YALE_H\n"
  },
  {
    "path": "ext/nmatrix/types.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == types.h\n//\n// Definition of simple types used throughout NMatrix.\n\n#ifndef NMATRIX_TYPES_H\n#define NMATRIX_TYPES_H\n\n/*\n * Standard Includes\n */\n\n#include <ruby.h>\n#include <cstdint>\n\n/*\n * Macros\n */\n\n#define EPSILON 1E-10\n#define FP_IS_ZERO(n) (-EPSILON < n && n < EPSILON)\n#define FP_EQUAL(a, b) FP_IS_ZERO((a - b))\n\n/*\n * Types\n */\n\ntypedef float    float32_t;\ntypedef double  float64_t;\n\ntypedef size_t  IType;\n\n#endif\n"
  },
  {
    "path": "ext/nmatrix/util/io.cpp",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == io.cpp\n//\n// Input/output support functions.\n\n#include \"io.h\"\n\n#include <ruby.h>\n\nnamespace nm { namespace io {\n\n  const char* const MATLAB_DTYPE_NAMES[NUM_MATLAB_DTYPES] = {\n    \"miUNDEFINED0\",\n    \"miINT8\",\n    \"miUINT8\",\n    \"miINT16\",\n    \"miUINT16\",\n    \"miINT32\",\n    \"miUINT32\",\n    \"miSINGLE\",\n    \"miRESERVED8\",\n    \"miDOUBLE\",\n    \"miRESERVED10\",\n    \"miRESERVED11\",\n    \"miINT64\",\n    \"miUINT64\",\n    \"miMATRIX\"\n  };\n\n  const size_t MATLAB_DTYPE_SIZES[NUM_MATLAB_DTYPES] = {\n    1, // undefined\n    1, // int8\n    1, // uint8\n    2, // int16\n    2, // uint16\n    4, // int32\n    4, // uint32\n    sizeof(float),\n    1, // reserved\n    sizeof(double),\n    1, // reserved\n    1, // reserved\n    8, // int64\n    8, // uint64\n    1  // matlab array?\n  };\n\n\n/*\n * Templated function for converting from MATLAB dtypes to NMatrix dtypes.\n */\ntemplate <typename DType, typename MDType>\nchar* matlab_cstring_to_dtype_string(size_t& result_len, const char* str, size_t bytes) {\n\n  result_len   = sizeof(DType) * bytes / sizeof(MDType);\n  char* result = NM_ALLOC_N(char, result_len);\n\n  if (bytes % sizeof(MDType) != 0) {\n    rb_raise(rb_eArgError, \"the given string does not divide evenly for the given MATLAB dtype\");\n  }\n\n  for (size_t i = 0, j = 0; i < bytes; i += sizeof(MDType), j += sizeof(DType)) {\n    *reinterpret_cast<DType*>(result+j) = (DType)(*reinterpret_cast<const MDType*>(str + i));\n  }\n\n  return result;\n}\n\n\n\n}} // end of namespace nm::io\n\nextern \"C\" {\n\n///////////////////////\n// Utility Functions //\n///////////////////////\n\n/*\n * Converts a string to a data type.\n */\nnm::dtype_t nm_dtype_from_rbstring(VALUE str) {\n\n  for (size_t index = 0; index < NM_NUM_DTYPES; ++index) {\n    if (!std::strncmp(RSTRING_PTR(str), DTYPE_NAMES[index], RSTRING_LEN(str))) {\n      return static_cast<nm::dtype_t>(index);\n    }\n  }\n\n  rb_raise(rb_eArgError, \"invalid data type string (%s) specified\", RSTRING_PTR(str));\n}\n\n\n/*\n * Converts a symbol to a data type.\n */\nnm::dtype_t nm_dtype_from_rbsymbol(VALUE sym) {\n  ID sym_id = SYM2ID(sym);\n\n  for (size_t index = 0; index < NM_NUM_DTYPES; ++index) {\n    if (sym_id == rb_intern(DTYPE_NAMES[index])) {\n      return static_cast<nm::dtype_t>(index);\n    }\n  }\n\n  VALUE str = rb_any_to_s(sym);\n  rb_raise(rb_eArgError, \"invalid data type symbol (:%s) specified\", RSTRING_PTR(str));\n}\n\n\n/*\n * Converts a string to a storage type. Only looks at the first three\n * characters.\n */\nnm::stype_t nm_stype_from_rbstring(VALUE str) {\n\n  for (size_t index = 0; index < NM_NUM_STYPES; ++index) {\n    if (!std::strncmp(RSTRING_PTR(str), STYPE_NAMES[index], 3)) {\n      return static_cast<nm::stype_t>(index);\n    }\n  }\n\n  rb_raise(rb_eArgError, \"Invalid storage type string specified\");\n  return nm::DENSE_STORE;\n}\n\n/*\n * Converts a symbol to a storage type.\n */\nnm::stype_t nm_stype_from_rbsymbol(VALUE sym) {\n\n  for (size_t index = 0; index < NM_NUM_STYPES; ++index) {\n    if (SYM2ID(sym) == rb_intern(STYPE_NAMES[index])) {\n      return static_cast<nm::stype_t>(index);\n    }\n  }\n\n  VALUE str = rb_any_to_s(sym);\n  rb_raise(rb_eArgError, \"invalid storage type symbol (:%s) specified\", RSTRING_PTR(str));\n  return nm::DENSE_STORE;\n}\n\n\n/*\n * Converts a MATLAB data-type symbol to an enum.\n */\nstatic nm::io::matlab_dtype_t matlab_dtype_from_rbsymbol(VALUE sym) {\n  for (size_t index = 0; index < nm::io::NUM_MATLAB_DTYPES; ++index) {\n    if (SYM2ID(sym) == rb_intern(nm::io::MATLAB_DTYPE_NAMES[index])) {\n      return static_cast<nm::io::matlab_dtype_t>(index);\n    }\n  }\n\n  rb_raise(rb_eArgError, \"Invalid matlab type specified.\");\n}\n\n\n/*\n * Take a string of bytes which represent MATLAB data type values and repack them into a string\n * of bytes representing values of an NMatrix dtype (or itype).\n *\n * Returns what appears to be a Ruby String.\n *\n * Arguments:\n * * str        :: the data\n * * from       :: symbol representing MATLAB data type (e.g., :miINT8)\n * * type       :: either :itype or some dtype symbol (:byte, :uint32, etc)\n */\nstatic VALUE nm_rbstring_matlab_repack(VALUE self, VALUE str, VALUE from, VALUE type) {\n  nm::io::matlab_dtype_t from_type = matlab_dtype_from_rbsymbol(from);\n  uint8_t to_type;\n\n  if (SYMBOL_P(type)) {\n    if (rb_to_id(type) == rb_intern(\"itype\")) {\n      if (sizeof(size_t) == sizeof(int64_t)) {\n        to_type = static_cast<int8_t>(nm::INT64);\n      } else if (sizeof(size_t) == sizeof(int32_t)) {\n        to_type = static_cast<int8_t>(nm::INT32);\n      } else if (sizeof(size_t) == sizeof(int16_t)) {\n        to_type = static_cast<int8_t>(nm::INT16);\n      } else {\n        rb_raise(rb_eStandardError, \"unhandled size_t definition\");\n      }\n    } else {\n      to_type = static_cast<uint8_t>(nm_dtype_from_rbsymbol(type));\n    }\n  } else {\n    rb_raise(rb_eArgError, \"expected symbol for third argument\");\n  }\n\n  // For next few lines, see explanation above NM_MATLAB_DTYPE_TEMPLATE_TABLE definition in io.h.\n  if (to_type >= static_cast<uint8_t>(nm::COMPLEX64)) {\n    rb_raise(rb_eArgError, \"can only repack into a simple dtype, no complex/VALUE\");\n  }\n\n  // Do the actual repacking -- really simple!\n  NM_MATLAB_DTYPE_TEMPLATE_TABLE(ttable, nm::io::matlab_cstring_to_dtype_string, char*, size_t& result_len, const char* str, size_t bytes);\n\n  size_t repacked_data_length;\n  char* repacked_data = ttable[to_type][from_type](repacked_data_length, RSTRING_PTR(str), RSTRING_LEN(str));\n\n  // Encode as 8-bit ASCII with a length -- don't want to hiccup on \\0\n  VALUE result = rb_str_new(repacked_data, repacked_data_length);\n  NM_FREE(repacked_data); // Don't forget to free what we allocated!\n\n  return result;\n}\n\n\n/*\n * Take two byte-strings (real and imaginary) and treat them as if they contain\n * a sequence of data of type dtype. Merge them together and return a new string.\n */\nstatic VALUE nm_rbstring_merge(VALUE self, VALUE rb_real, VALUE rb_imaginary, VALUE rb_dtype) {\n\n  // Sanity check.\n  if (RSTRING_LEN(rb_real) != RSTRING_LEN(rb_imaginary)) {\n    rb_raise(rb_eArgError, \"real and imaginary components do not have same length\");\n  }\n\n  nm::dtype_t dtype = nm_dtype_from_rbsymbol(rb_dtype);\n  size_t len        = DTYPE_SIZES[dtype];\n\n  char *real        = RSTRING_PTR(rb_real),\n       *imag        = RSTRING_PTR(rb_imaginary);\n\n  char* merge       = NM_ALLOCA_N(char, RSTRING_LEN(rb_real)*2);\n\n  size_t merge_pos  = 0;\n\n  // Merge the two sequences\n  for (size_t i = 0; i < (size_t)RSTRING_LEN(rb_real); i += len) {\n\n    // Copy real number\n    memcpy(merge + merge_pos, real + i, len);\n    merge_pos += len;\n\n    // Copy imaginary number\n    memcpy(merge + merge_pos, imag + i, len);\n    merge_pos += len;\n  }\n\n  return rb_str_new(merge, merge_pos);\n}\n\n\nvoid nm_init_io() {\n  cNMatrix_IO = rb_define_module_under(cNMatrix, \"IO\");\n  cNMatrix_IO_Matlab = rb_define_module_under(cNMatrix_IO, \"Matlab\");\n\n  rb_define_singleton_method(cNMatrix_IO_Matlab, \"repack\", (METHOD)nm_rbstring_matlab_repack, 3);\n  rb_define_singleton_method(cNMatrix_IO_Matlab, \"complex_merge\", (METHOD)nm_rbstring_merge, 3);\n}\n\n\n\n}\n"
  },
  {
    "path": "ext/nmatrix/util/io.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == io.h\n//\n// Header file for input/output support functions.\n\n#ifndef NMATRIX_IO_H\n#define NMATRIX_IO_H\n\n/*\n * Project Includes\n */\n\n#include \"nmatrix.h\"\n\n#include \"data/data.h\"\n#include \"storage/storage.h\"\n\n/*\n * Extern Types\n */\nextern const char* const DTYPE_NAMES[nm::NUM_DTYPES];\n\nnamespace nm { namespace io {\n  /*\n   * Types\n   */\n  enum matlab_dtype_t {\n    miINT8 = 1,\n    miUINT8 = 2,\n    miINT16 = 3,\n    miUINT16 = 4,\n    miINT32 = 5,\n    miUINT32 = 6,\n    miSINGLE = 7,\n    miDOUBLE = 9,\n    miINT64 = 12,\n    miUINT64 = 13,\n    miMATRIX = 14\n  };\n\n  /*\n   * Constants\n   */\n\n  const size_t NUM_MATLAB_DTYPES = 15;\n}} // end of namespace nm::io\n\nextern \"C\" {\n\n  /*\n   * C accessors.\n   */\n  nm::dtype_t nm_dtype_from_rbsymbol(VALUE sym);\n  nm::dtype_t nm_dtype_from_rbstring(VALUE str);\n  nm::stype_t nm_stype_from_rbsymbol(VALUE sym);\n  nm::stype_t nm_stype_from_rbstring(VALUE str);\n\n  void nm_init_io(void);\n\n\n  /*\n   * Macros.\n   */\n\n  /*\n   * Macro for a function pointer table between NMatrix dtypes and MATLAB dtypes.\n   *\n   * You can't convert as freely between these two as you can between NMatrix dtypes, but there's no reason to. MATLAB\n   * stores its complex numbers in two separate arrays, for example, not as a single unit of data. If you want to convert\n   * to a VALUE, convert first to an appropriate integer or float type.\n   *\n   * FIXME: Maybe be a little more selective about which conversions we DO allow. This is really just for loading an\n   * already-constructed MATLAB matrix into memory, and most of these functions will never get called.\n   */\n  #define NM_MATLAB_DTYPE_TEMPLATE_TABLE(name,fun,ret,...)    \\\n  static ret (*(name)[7][nm::io::NUM_MATLAB_DTYPES])(__VA_ARGS__) = {  \\\n      { NULL, fun<uint8_t,int8_t>, fun<uint8_t,uint8_t>, fun<uint8_t,int16_t>, fun<uint8_t,uint16_t>, fun<uint8_t,int32_t>, fun<uint8_t,uint32_t>, fun<uint8_t,float>, NULL, fun<uint8_t,double>, NULL, NULL, fun<uint8_t,int64_t>, fun<uint8_t,uint64_t>, NULL },  \\\n      { NULL, fun<int8_t,int8_t>, fun<int8_t,uint8_t>, fun<int8_t,int16_t>, fun<int8_t,uint16_t>, fun<int8_t,int32_t>, fun<int8_t,uint32_t>, fun<int8_t,float>, NULL, fun<int8_t,double>, NULL, NULL, fun<int8_t,int64_t>, fun<int8_t,uint64_t>, NULL },            \\\n      { NULL, fun<int16_t,int8_t>, fun<int16_t,uint8_t>, fun<int16_t,int16_t>, fun<int16_t,uint16_t>, fun<int16_t,int32_t>, fun<int16_t,uint32_t>, fun<int16_t,float>, NULL, fun<int16_t,double>, NULL, NULL, fun<int16_t,int64_t>, fun<int16_t,uint64_t>, NULL },  \\\n      { NULL, fun<int32_t,int8_t>, fun<int32_t,uint8_t>, fun<int32_t,int16_t>, fun<int32_t,uint16_t>, fun<int32_t,int32_t>, fun<int32_t,uint32_t>, fun<int32_t,float>, NULL, fun<int32_t,double>, NULL, NULL, fun<int32_t,int64_t>, fun<int32_t,uint64_t>, NULL },  \\\n      { NULL, fun<int64_t,int8_t>, fun<int64_t,uint8_t>, fun<int64_t,int16_t>, fun<int64_t,uint16_t>, fun<int64_t,int32_t>, fun<int64_t,uint32_t>, fun<int64_t,float>, NULL, fun<int64_t,double>, NULL, NULL, fun<int64_t,int64_t>, fun<int64_t,uint64_t>, NULL },  \\\n      { NULL, fun<float,int8_t>, fun<float,uint8_t>, fun<float,int16_t>, fun<float,uint16_t>, fun<float,int32_t>, fun<float,uint32_t>, fun<float,float>, NULL, fun<float,double>, NULL, NULL, fun<float,int64_t>, fun<float,uint64_t>, NULL },                      \\\n      { NULL, fun<double,int8_t>, fun<double,uint8_t>, fun<double,int16_t>, fun<double,uint16_t>, fun<double,int32_t>, fun<double,uint32_t>, fun<double,float>, NULL, fun<double,double>, NULL, NULL, fun<double,int64_t>, fun<double,uint64_t>, NULL }             \\\n    };\n\n  /*\n   * Hash#has_key? for symbols. Arguments are: hash (VALUE), string (char*).\n   */\n  #define RB_HASH_HAS_SYMBOL_KEY(hash, str)   (rb_funcall((hash), rb_intern(\"has_key?\"), 1, ID2SYM(rb_intern(str))) == Qtrue)\n}\n\n\n#endif\n"
  },
  {
    "path": "ext/nmatrix/util/sl_list.cpp",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == sl_list.cpp\n//\n// Singly-linked list implementation\n\n/*\n * Standard Includes\n */\n\n#include <ruby.h>\n\n/*\n * Project Includes\n */\n\n#include \"types.h\"\n\n#include \"data/data.h\"\n\n#include \"sl_list.h\"\n\n#include \"storage/list/list.h\"\n\nnamespace nm { namespace list {\n\n/*\n * Macros\n */\n\n#ifndef RHASH_SET_IFNONE\n#define RHASH_SET_IFNONE(h, v) (RHASH(h)->ifnone = (v))\n#endif\n\n/*\n * Global Variables\n */\n \n\n/*\n * Forward Declarations\n */\n\n/*\n * Functions\n */\n\n////////////////\n// Lifecycle //\n///////////////\n\n/*\n * Creates an empty linked list.\n */\nLIST* create(void) {\n  LIST* list = NM_ALLOC( LIST );\n  list->first = NULL;\n  return list;\n}\n\n/*\n * Deletes the linked list and all of its contents. If you want to delete a\n * list inside of a list, set recursions to 1. For lists inside of lists inside\n *  of the list, set it to 2; and so on. Setting it to 0 is for no recursions.\n */\nvoid del(LIST* list, size_t recursions) {\n  NODE* next;\n  NODE* curr = list->first;\n\n  while (curr != NULL) {\n    next = curr->next;\n\n    if (recursions == 0) {\n      //fprintf(stderr, \"    free_val: %p\\n\", curr->val);\n      nm_list_storage_completely_unregister_node(curr);\n      NM_FREE(curr->val);\n      \n    } else {\n      //fprintf(stderr, \"    free_list: %p\\n\", list);\n      del((LIST*)curr->val, recursions - 1);\n    }\n\n    NM_FREE(curr);\n    curr = next;\n  }\n  //fprintf(stderr, \"    free_list: %p\\n\", list);\n  NM_FREE(list);\n}\n\n/*\n * Documentation goes here.\n */\nvoid mark(LIST* list, size_t recursions) {\n  NODE* next;\n  NODE* curr = list->first;\n\n  while (curr != NULL) {\n    next = curr->next;\n    \n    if (recursions == 0) {\n      rb_gc_mark(*((VALUE*)(curr->val)));\n      \n    } else {\n      mark((LIST*)curr->val, recursions - 1);\n    }\n    \n    curr = next;\n  }\n}\n\n///////////////\n// Accessors //\n///////////////\n\n\n/*\n * Given a list, insert key/val as the first entry in the list. Does not do any\n * checks, just inserts.\n */\nNODE* insert_first_node(LIST* list, size_t key, void* val, size_t val_size) {\n  NODE* ins   = NM_ALLOC(NODE);\n  ins->next   = list->first;\n\n  void* val_copy = NM_ALLOC_N(char, val_size);\n  memcpy(val_copy, val, val_size);\n\n  ins->val    = reinterpret_cast<void*>(val_copy);\n  ins->key    = key;\n  list->first = ins;\n\n  return ins;\n}\n\nNODE* insert_first_list(LIST* list, size_t key, LIST* l) {\n  NODE* ins   = NM_ALLOC(NODE);\n  ins->next   = list->first;\n\n  ins->val    = reinterpret_cast<void*>(l);\n  ins->key    = key;\n  list->first = ins;\n\n  return ins;\n}\n\n\n/* \n * Given a list and a key/value-ptr pair, create a node (and return that node).\n * If NULL is returned, it means insertion failed.\n * If the key already exists in the list, replace tells it to delete the old\n * value and put in your new one. !replace means delete the new value.\n */\nNODE* insert(LIST* list, bool replace, size_t key, void* val) {\n  NODE *ins;\n\n  if (list->first == NULL) {\n    // List is empty\n    \n    //if (!(ins = malloc(sizeof(NODE)))) return NULL;\n    ins = NM_ALLOC(NODE);\n    ins->next             = NULL;\n    ins->val              = val;\n    ins->key              = key;\n    list->first           = ins;\n    \n    return ins;\n\n  } else if (key < list->first->key) {\n    // Goes at the beginning of the list\n    \n    //if (!(ins = malloc(sizeof(NODE)))) return NULL;\n    ins = NM_ALLOC(NODE);\n    ins->next             = list->first;\n    ins->val              = val;\n    ins->key              = key;\n    list->first           = ins;\n    \n    return ins;\n  }\n\n  // Goes somewhere else in the list.\n  ins = find_nearest_from(list->first, key);\n\n  if (ins->key == key) {\n    // key already exists\n    if (replace) {\n      nm_list_storage_completely_unregister_node(ins);\n      NM_FREE(ins->val);\n      ins->val = val;\n    } else {\n      NM_FREE(val);\n    }\n    \n    return ins;\n\n  } else {\n    return insert_after(ins, key, val);\n  }\n}\n\n\n\n/*\n * Documentation goes here.\n */\nNODE* insert_after(NODE* node, size_t key, void* val) {\n  //if (!(ins = malloc(sizeof(NODE)))) return NULL;\n  NODE* ins = NM_ALLOC(NODE);\n\n  // insert 'ins' between 'node' and 'node->next'\n  ins->next  = node->next;\n  node->next = ins;\n\n  // initialize our new node\n  ins->key   = key;\n  ins->val   = val;\n\n  return ins;\n}\n\n\n/*\n * Insert a new node immediately after +node+, or replace the existing one if its key is a match.\n */\nNODE* replace_insert_after(NODE* node, size_t key, void* val, bool copy, size_t copy_size) {\n  if (node->next && node->next->key == key) {\n\n    // Should we copy into the current one or free and insert?\n    if (copy) memcpy(node->next->val, val, copy_size);\n    else {\n      NM_FREE(node->next->val);\n      node->next->val = val;\n    }\n\n    return node->next;\n\n  } else { // no next node, or if there is one, it's greater than the current key\n\n    if (copy) {\n      void* val_copy = NM_ALLOC_N(char, copy_size);\n      memcpy(val_copy, val, copy_size);\n      return insert_after(node, key, val_copy);\n    } else {\n      return insert_after(node, key, val);\n    }\n\n  }\n}\n\n\n\n/*\n * Functions analogously to list::insert but this inserts a copy of the value instead of the original.\n */\nNODE* insert_copy(LIST *list, bool replace, size_t key, void *val, size_t size) {\n  void *copy_val = NM_ALLOC_N(char, size);\n  memcpy(copy_val, val, size);\n\n  return insert(list, replace, key, copy_val);\n}\n\n\n/*\n * Returns the value pointer for some key. Doesn't free the memory for that value. Doesn't require a find operation,\n * assumes finding has already been done. If rm is the first item in the list, prev should be NULL.\n */\nvoid* remove_by_node(LIST* list, NODE* prev, NODE* rm) {\n  if (!prev)  list->first = rm->next;\n  else        prev->next  = rm->next;\n\n  void* val   = rm->val;\n  NM_FREE(rm);\n\n  return val;\n}\n\n\n/*\n * Returns the value pointer (not the node) for some key. Note that it doesn't\n * free the memory for the value stored in the node -- that pointer gets\n * returned! Only the node is destroyed.\n */\nvoid* remove_by_key(LIST* list, size_t key) {\n  NODE *f, *rm;\n  void* val;\n\n  if (!list->first || list->first->key > key) { // empty list or def. not present\n    return NULL;\n  }\n\n  if (list->first->key == key) {\n    val = list->first->val;\n    rm  = list->first;\n    \n    list->first = rm->next;\n    NM_FREE(rm);\n    \n    return val;\n  }\n\n  f = find_preceding_from_node(list->first, key);\n  if (!f || !f->next) { // not found, end of list\n    return NULL;\n  }\n\n  if (f->next->key == key) {\n    // remove the node\n    rm      = f->next;\n    f->next = rm->next;\n\n    // get the value and free the memory for the node\n    val = rm->val;\n    NM_FREE(rm);\n\n    return val;\n  }\n\n  return NULL; // not found, middle of list\n}\n\n\nbool node_is_within_slice(NODE* n, size_t coord, size_t len) {\n  if (!n) return false;\n  if (n->key >= coord && n->key < coord + len) return true;\n  else return false;\n}\n\n\n/*\n * Recursive removal of lists that may contain sub-lists. Stores the value ultimately removed in rm.\n */\nbool remove_recursive(LIST* list, const size_t* coords, const size_t* offsets, const size_t* lengths, size_t r, const size_t& dim) {\n//  std::cerr << \"remove_recursive: \" << r << std::endl;\n  // find the current coordinates in the list\n  NODE* prev    = find_preceding_from_list(list, coords[r] + offsets[r]);\n  NODE* n;\n  if (prev) n  = prev->next && node_is_within_slice(prev->next, coords[r] + offsets[r], lengths[r]) ? prev->next : NULL;\n  else      n  = node_is_within_slice(list->first, coords[r] + offsets[r], lengths[r]) ? list->first : NULL;\n\n  if (r < dim-1) { // nodes here are lists\n\n    while (n) {\n      // from that sub-list, call remove_recursive.\n      bool remove_parent = remove_recursive(reinterpret_cast<LIST*>(n->val), coords, offsets, lengths, r+1, dim);\n\n      if (remove_parent) { // now empty -- so remove the sub-list\n//        std::cerr << r << \": removing parent list at \" << n->key << std::endl;\n        NM_FREE(remove_by_node(list, prev, n));\n\n        if (prev) n  = prev->next && node_is_within_slice(prev->next, coords[r] + offsets[r], lengths[r]) ? prev->next : NULL;\n        else      n  = node_is_within_slice(list->first, coords[r] + offsets[r], lengths[r]) ? list->first : NULL;\n      } else {\n        // Move forward to next node (list at n still exists)\n        prev         = n;\n        n            = prev->next && node_is_within_slice(prev->next, coords[r] + offsets[r], lengths[r]) ? prev->next : NULL;\n      }\n\n      // Iterate to next one.\n      if (prev) n  = prev->next && node_is_within_slice(prev->next, coords[r] + offsets[r], lengths[r]) ? prev->next : NULL;\n      else      n  = node_is_within_slice(list->first, coords[r] + offsets[r], lengths[r]) ? list->first : NULL;\n    }\n\n  } else { // nodes here are not lists, but actual values\n\n    while (n) {\n//      std::cerr << r << \": removing node at \" << n->key << std::endl;\n      NM_FREE(remove_by_node(list, prev, n));\n\n      if (prev) n  = prev->next && node_is_within_slice(prev->next, coords[r] + offsets[r], lengths[r]) ? prev->next : NULL;\n      else      n  = node_is_within_slice(list->first, coords[r] + offsets[r], lengths[r]) ? list->first : NULL;\n    }\n  }\n\n  if (!list->first) return true; // if current list is now empty, signal its removal\n\n  return false;\n}\n\n///////////\n// Tests //\n///////////\n\n\n/////////////\n// Utility //\n/////////////\n\n/*\n * Find some element in the list and return the node ptr for that key.\n */\nNODE* find(LIST* list, size_t key) {\n  NODE* f;\n  if (!list->first) {\n    // empty list -- does not exist\n    return NULL;\n  }\n\n  // see if we can find it.\n  f = find_nearest_from(list->first, key);\n  \n  if (!f || f->key == key) {\n    return f;\n  }\n  \n  return NULL;\n}\n\n\n\n/*\n * Find some element in the list and return the node ptr for that key.\n */\nNODE* find_with_preceding(LIST* list, size_t key, NODE*& prev) {\n  if (!prev) prev = list->first;\n  if (!prev) return NULL; // empty list, does not exist\n\n  if (prev->key == key) {\n    NODE* n = prev;\n    prev    = NULL;\n    return n;\n  }\n\n  while (prev->next && prev->next->key < key) {\n    prev = prev->next;\n  }\n\n  return prev->next;\n}\n\n\n\n\n/*\n * Finds the node that should go before whatever key we request, whether or not\n * that key is present.\n */\nNODE* find_preceding_from_node(NODE* prev, size_t key) {\n  NODE* curr = prev->next;\n\n  if (!curr || key <= curr->key) {\n    return prev;\n    \n  } else {\n    return find_preceding_from_node(curr, key);\n  }\n}\n\n\n/*\n * Returns NULL if the key being sought is first in the list or *should* be first in the list but is absent. Otherwise\n * returns the previous node to where that key is or should be.\n */\nNODE* find_preceding_from_list(LIST* l, size_t key) {\n  NODE* n = l->first;\n  if (!n || n->key >= key)  return NULL;\n  else                      return find_preceding_from_node(n, key);\n}\n\n/*\n * Finds the node or, if not present, the node that it should follow. NULL\n * indicates no preceding node.\n */\nNODE* find_nearest(LIST* list, size_t key) {\n  return find_nearest_from(list->first, key);\n}\n\n/*\n * Finds a node or the one immediately preceding it if it doesn't exist.\n */\nNODE* find_nearest_from(NODE* prev, size_t key) {\n  NODE* f;\n\n  if (prev && prev->key == key) {\n    return prev;\n  }\n\n  f = find_preceding_from_node(prev, key);\n\n  if (!f->next) { // key exceeds final node; return final node.\n    return f;\n    \n  } else if (key == f->next->key) { // node already present; return location\n    return f->next;\n\n  } else {\n    return f;\n  }\n}\n\n/////////////////////////\n// Copying and Casting //\n/////////////////////////\n\n\n/*\n * Copy the contents of a list.\n */\ntemplate <typename LDType, typename RDType>\nvoid cast_copy_contents(LIST* lhs, const LIST* rhs, size_t recursions) {\n  NODE *lcurr, *rcurr;\n\n  if (rhs->first) {\n    // copy head node\n    rcurr = rhs->first;\n    lcurr = lhs->first = NM_ALLOC( NODE );\n\n    while (rcurr) {\n      lcurr->key = rcurr->key;\n\n      if (recursions == 0) {\n        // contents is some kind of value\n\n        lcurr->val = NM_ALLOC( LDType );\n\n        *reinterpret_cast<LDType*>(lcurr->val) = *reinterpret_cast<RDType*>( rcurr->val );\n\n      } else {\n        // contents is a list\n\n        lcurr->val = NM_ALLOC( LIST );\n\n        cast_copy_contents<LDType, RDType>(\n          reinterpret_cast<LIST*>(lcurr->val),\n          reinterpret_cast<LIST*>(rcurr->val),\n          recursions-1\n        );\n      }\n\n      if (rcurr->next) {\n        lcurr->next = NM_ALLOC( NODE );\n\n      } else {\n        lcurr->next = NULL;\n      }\n\n      lcurr = lcurr->next;\n      rcurr = rcurr->next;\n    }\n\n  } else {\n    lhs->first = NULL;\n  }\n}\n\n}} // end of namespace nm::list\n\nextern \"C\" {\n\n  /*\n   * C access for copying the contents of a list.\n   */\n  void nm_list_cast_copy_contents(LIST* lhs, const LIST* rhs, nm::dtype_t lhs_dtype, nm::dtype_t rhs_dtype, size_t recursions) {\n    LR_DTYPE_TEMPLATE_TABLE(nm::list::cast_copy_contents, void, LIST*, const LIST*, size_t);\n\n    ttable[lhs_dtype][rhs_dtype](lhs, rhs, recursions);\n  }\n\n  /*\n   * Sets up a hash with an appropriate default values. That means that if recursions == 0, the default value is default_value,\n   * but if recursions == 1, the default value is going to be a hash with default value of default_value, and if recursions == 2,\n   * the default value is going to be a hash with default value of hash with default value of default_value, and so on.\n   * In other words, it's recursive.\n   */\n  static VALUE empty_list_to_hash(const nm::dtype_t dtype, size_t recursions, VALUE default_value) {\n    VALUE h = rb_hash_new();\n    if (recursions) {\n      RHASH_SET_IFNONE(h, empty_list_to_hash(dtype, recursions-1, default_value));\n    } else {\n      RHASH_SET_IFNONE(h, default_value);\n    }\n    return h;\n  }\n\n\n  /*\n   * Copy a list to a Ruby Hash\n   */\n  VALUE nm_list_copy_to_hash(const LIST* l, const nm::dtype_t dtype, size_t recursions, VALUE default_value) {\n\n    // Create a hash with default values appropriately specified for a sparse matrix.\n    VALUE h = empty_list_to_hash(dtype, recursions, default_value);\n\n    if (l->first) {\n      NODE* curr = l->first;\n\n      while (curr) {\n\n        size_t key = curr->key;\n\n        if (recursions == 0) { // content is some kind of value\n          rb_hash_aset(h, INT2FIX(key), nm::rubyobj_from_cval(curr->val, dtype).rval);\n        } else { // content is a list\n          rb_hash_aset(h, INT2FIX(key), nm_list_copy_to_hash(reinterpret_cast<const LIST*>(curr->val), dtype, recursions-1, default_value));\n        }\n\n        curr = curr->next;\n\n      }\n\n    }\n\n    return h;\n  }\n\n\n} // end of extern \"C\" block\n\n"
  },
  {
    "path": "ext/nmatrix/util/sl_list.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == sl_list.h\n//\n// Singly-linked list implementation used for List Storage.\n\n#ifndef SL_LIST_H\n#define SL_LIST_H\n\n\n/*\n * Standard Includes\n */\n\n#include <ruby.h>\n#include <type_traits>\n#include <cstdlib>\n\n/*\n * Project Includes\n */\n\n#include \"types.h\"\n\n#include \"data/data.h\"\n\n#include \"nmatrix.h\"\n\nnamespace nm { namespace list {\n\n/*\n * Macros\n */\n\n/*\n * Types\n */\n\n/*\n * Data\n */\n \n\n/*\n * Functions\n */\n \n////////////////\n// Lifecycle //\n///////////////\n\nLIST*  create(void);\nvoid  del(LIST* list, size_t recursions);\nvoid  mark(LIST* list, size_t recursions);\n\n///////////////\n// Accessors //\n///////////////\n\nNODE* insert(LIST* list, bool replace, size_t key, void* val);\nNODE* insert_copy(LIST *list, bool replace, size_t key, void *val, size_t size);\nNODE* insert_first_node(LIST* list, size_t key, void* val, size_t val_size);\nNODE* insert_first_list(LIST* list, size_t key, LIST* l);\nNODE* insert_after(NODE* node, size_t key, void* val);\nNODE* replace_insert_after(NODE* node, size_t key, void* val, bool copy, size_t copy_size);\nvoid* remove(LIST* list, size_t key);\nvoid* remove_by_node(LIST* list, NODE* prev, NODE* rm);\nbool remove_recursive(LIST* list, const size_t* coords, const size_t* offset, const size_t* lengths, size_t r, const size_t& dim);\nbool node_is_within_slice(NODE* n, size_t coord, size_t len);\n\ntemplate <typename Type>\ninline NODE* insert_helper(LIST* list, NODE* node, size_t key, Type val) {\n  Type* val_mem = NM_ALLOC(Type);\n  *val_mem = val;\n  \n  if (node == NULL) {\n    return insert(list, false, key, val_mem);\n    \n  } else {\n    return insert_after(node, key, val_mem);\n  }\n}\n\ntemplate <typename Type>\ninline NODE* insert_helper(LIST* list, NODE* node, size_t key, Type* ptr) {\n  if (node == NULL) {\n    return insert(list, false, key, ptr);\n    \n  } else {\n    return insert_after(node, key, ptr);\n  }\n}\n\n///////////\n// Tests //\n///////////\n\n\n/////////////\n// Utility //\n/////////////\n\nNODE* find(LIST* list, size_t key);\nNODE* find_preceding_from_node(NODE* prev, size_t key);\nNODE* find_preceding_from_list(LIST* l, size_t key);\nNODE* find_nearest(LIST* list, size_t key);\nNODE* find_nearest_from(NODE* prev, size_t key);\n\n/////////////////////////\n// Copying and Casting //\n/////////////////////////\n\ntemplate <typename LDType, typename RDType>\nvoid cast_copy_contents(LIST* lhs, const LIST* rhs, size_t recursions);\n\n}} // end of namespace nm::list\n\nextern \"C\" {\n  void nm_list_cast_copy_contents(LIST* lhs, const LIST* rhs, nm::dtype_t lhs_dtype, nm::dtype_t rhs_dtype, size_t recursions);\n  VALUE nm_list_copy_to_hash(const LIST* l, const nm::dtype_t dtype, size_t recursions, VALUE default_value);\n} // end of extern \"C\" block\n\n#endif // SL_LIST_H\n"
  },
  {
    "path": "ext/nmatrix/util/util.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == util.h\n//\n// Header file for utility functions and data.\n\n#ifndef UTIL_H\n#define UTIL_H\n\n/*\n * Standard Includes\n */\n\n/*\n * Project Includes\n */\n\n#include \"types.h\"\n\n/*\n * Macros\n */\n\n/*\n * Types\n */\n\n/*\n * Data\n */\n\n/*\n * Functions\n */\nnamespace nm {\n  template <typename Type>\n  inline Type gcf(Type x, Type y) {\n    Type t;\n\n    if (x < 0) x = -x;\n    if (y < 0) y = -y;\n\n    if (x == 0) return y;\n    if (y == 0) return x;\n\n    while (x > 0) {\n      t = x;\n      x = y % x;\n      y = t;\n    }\n\n    return y;\n  }\n} // end of namespace nm\n\n\n#endif // UTIL_H\n"
  },
  {
    "path": "ext/nmatrix_atlas/extconf.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == extconf.rb\n#\n# This file checks for ATLAS and other necessary headers, and\n# generates a Makefile for compiling NMatrix.\n\nrequire \"nmatrix/mkmf\"\n\n#$INSTALLFILES = [['nmatrix.h', '$(archdir)'], ['nmatrix.hpp', '$(archdir)'], ['nmatrix_config.h', '$(archdir)'], ['nm_memory.h', '$(archdir)']]\nif /cygwin|mingw/ =~ RUBY_PLATFORM\n  #$INSTALLFILES << ['libnmatrix.a', '$(archdir)']\nend\n\n$DEBUG = true\n#not the right way to add this include directory\n$CFLAGS = [\"-Wall -Werror=return-type -I$(srcdir)/../nmatrix\",$CFLAGS].join(\" \")\n$CXXFLAGS = [\"-Wall -Werror=return-type -I$(srcdir)/../nmatrix\",$CXXFLAGS].join(\" \")\n$CPPFLAGS = [\"-Wall -Werror=return-type -I$(srcdir)/../nmatrix\",$CPPFLAGS].join(\" \")\n\n# When adding objects here, make sure their directories are included in CLEANOBJS down at the bottom of extconf.rb.\n# Why not just autogenerate this list from all .c/.cpp files in directory?\nbasenames = %w{nmatrix_atlas math_atlas}\n$objs = basenames.map { |b| \"#{b}.o\"   }\n$srcs = basenames.map { |b| \"#{b}.cpp\" }\n\n# The next line allows the user to supply --with-atlas-dir=/usr/local/atlas,\n# --with-atlas-lib or --with-atlas-include and tell the compiler where to look\n# for ATLAS. The same for all the others\n#\n#dir_config(\"clapack\", [\"/usr/local/atlas/include\"], [])\n#\n#\n\n# Is g++ having trouble finding your header files?\n# Try this:\n#   export C_INCLUDE_PATH=/usr/local/atlas/include\n#   export CPLUS_INCLUDE_PATH=/usr/local/atlas/include\n# (substituting in the path of your cblas.h and clapack.h for the path I used). -- JW 8/27/12\n\nidefaults = {lapack: [\"/usr/include/atlas\"],\n             cblas: [\"/usr/local/atlas/include\", \"/usr/include/atlas\"],\n             atlas: [\"/usr/local/atlas/include\", \"/usr/include/atlas\"]}\n\n# For some reason, if we try to look for /usr/lib64/atlas on a Mac OS X Mavericks system, and the directory does not\n# exist, it will give a linker error -- even if the lib dir is already correctly included with -L. So we need to check\n# that Dir.exists?(d) for each.\nldefaults = {lapack: [\"/usr/local/lib\", \"/usr/local/atlas/lib\", \"/usr/lib64/atlas\"].delete_if { |d| !Dir.exists?(d) },\n             cblas: [\"/usr/local/lib\", \"/usr/local/atlas/lib\", \"/usr/lib64/atlas\"].delete_if { |d| !Dir.exists?(d) },\n             atlas: [\"/usr/local/lib\", \"/usr/local/atlas/lib\", \"/usr/lib\", \"/usr/lib64/atlas\"].delete_if { |d| !Dir.exists?(d) }}\n\nif have_library(\"clapack\") # Usually only applies for Mac OS X\n  $libs += \" -lclapack \"\nend\n\nunless have_library(\"lapack\")\n  dir_config(\"lapack\", idefaults[:lapack], ldefaults[:lapack])\nend\n\nunless have_library(\"cblas\")\n  dir_config(\"cblas\", idefaults[:cblas], ldefaults[:cblas])\nend\n\nunless have_library(\"atlas\")\n  dir_config(\"atlas\", idefaults[:atlas], ldefaults[:atlas])\nend\n\n# If BLAS and LAPACK headers are in an atlas directory, prefer those. Otherwise,\n# we try our luck with the default location.\nif have_header(\"atlas/cblas.h\")\n  have_header(\"atlas/clapack.h\")\nelse\n  have_header(\"cblas.h\")\n  have_header(\"clapack.h\")\nend\n\n\n# Although have_func is supposed to take a list as its second argument, I find that it simply\n# applies a :to_s to the second arg and doesn't actually check each one. We may want to put\n# have_func calls inside an :each block which checks atlas/clapack.h, cblas.h, clapack.h, and\n# lastly lapack.h. On Ubuntu, it only works if I use atlas/clapack.h. --@mohawkjohn 8/20/14\nhave_func(\"clapack_dgetrf\", \"atlas/clapack.h\")\nhave_func(\"clapack_dgetri\", \"atlas/clapack.h\")\nhave_func(\"dgesvd_\", \"clapack.h\") # This may not do anything. dgesvd_ seems to be in LAPACK, not CLAPACK.\n\nhave_func(\"cblas_dgemm\", \"cblas.h\")\n\n#have_func(\"rb_scan_args\", \"ruby.h\")\n\n#find_library(\"lapack\", \"clapack_dgetrf\")\n#find_library(\"cblas\", \"cblas_dgemm\")\n#find_library(\"atlas\", \"ATL_dgemmNN\")\n# Order matters here: ATLAS has to go after LAPACK: http://mail.scipy.org/pipermail/scipy-user/2007-January/010717.html\n$libs += \" -llapack -lcblas -latlas \"\n#$libs += \" -lprofiler \"\n\ncreate_conf_h(\"nmatrix_atlas_config.h\")\ncreate_makefile(\"nmatrix_atlas\")\n\n# to clean up object files in subdirectories:\nopen('Makefile', 'a') do |f|\n  clean_objs_paths = %w{ }.map { |d| \"#{d}/*.#{CONFIG[\"OBJEXT\"]}\" }\n  f.write(\"CLEANOBJS := $(CLEANOBJS) #{clean_objs_paths.join(' ')}\")\nend\n"
  },
  {
    "path": "ext/nmatrix_atlas/math_atlas/cblas_templates_atlas.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == cblas_templaces_atlas.h\n//\n// Define template functions for calling CBLAS functions in the\n// nm::math::atlas namespace.\n//\n\n#ifndef CBLAS_TEMPLATES_ATLAS_H\n#define CBLAS_TEMPLATES_ATLAS_H\n\n//includes so we have access to internal implementations\n#include \"math/rotg.h\"\n#include \"math/rot.h\"\n#include \"math/asum.h\"\n#include \"math/nrm2.h\"\n#include \"math/imax.h\"\n#include \"math/scal.h\"\n#include \"math/gemv.h\"\n#include \"math/gemm.h\"\n#include \"math/trsm.h\"\n\nnamespace nm { namespace math { namespace atlas {\n\n//Add cblas templates in the correct namespace\n#include \"math/cblas_templates_core.h\"\n\n//Add complex specializations for rot and rotg. These cblas functions are not\n//part of the the standard CBLAS and so need to be in an nmatrix-atlas header.\ntemplate <>\ninline void rotg(Complex64* a, Complex64* b, Complex64* c, Complex64* s) {\n  cblas_crotg(a, b, c, s);\n}\n\ntemplate <>\ninline void rotg(Complex128* a, Complex128* b, Complex128* c, Complex128* s) {\n  cblas_zrotg(a, b, c, s);\n}\ntemplate <>\ninline void rot(const int N, Complex64* X, const int incX, Complex64* Y, const int incY, const float c, const float s) {\n  cblas_csrot(N, X, incX, Y, incY, c, s);\n}\n\ntemplate <>\ninline void rot(const int N, Complex128* X, const int incX, Complex128* Y, const int incY, const double c, const double s) {\n  cblas_zdrot(N, X, incX, Y, incY, c, s);\n}\n\n}}} //nm::math::atlas\n\n#endif\n"
  },
  {
    "path": "ext/nmatrix_atlas/math_atlas/clapack_templates.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == clapack_templates.h\n//\n// Collection of functions used to call ATLAS CLAPACK functions\n// directly.\n//\n\n#ifndef CLAPACK_TEMPLATES_H\n#define CLAPACK_TEMPLATES_H\n\n//needed to get access to internal implementations\n#include \"math/getrf.h\"\n#include \"math/getrs.h\"\n\nnamespace nm { namespace math { namespace atlas {\n//The first group of functions are those for which we have internal implementations.\n//The internal implementations are defined in the ext/nmatrix/math directory\n//and are the non-specialized\n//forms of the template functions nm::math::whatever().\n//They are are called below for non-BLAS\n//types in the non-specialized form of the template nm::math::atlas::whatever().\n//The specialized forms call the appropriate clapack functions.\n\n//We also define the clapack_whatever() template\n//functions below, which just cast\n//their arguments to the appropriate types.\n\n\n//getrf\ntemplate <typename DType>\ninline int getrf(const enum CBLAS_ORDER order, const int m, const int n, DType* a, const int lda, int* ipiv) {\n  return nm::math::getrf<DType>(order, m, n, a, lda, ipiv);\n}\n\n//Apparently CLAPACK isn't available on OS X, so we only define these\n//specializations if available,\n#if defined (HAVE_CLAPACK_H) || defined (HAVE_ATLAS_CLAPACK_H)\ntemplate <>\ninline int getrf(const enum CBLAS_ORDER order, const int m, const int n, float* a, const int lda, int* ipiv) {\n  return clapack_sgetrf(order, m, n, a, lda, ipiv);\n}\n\ntemplate <>\ninline int getrf(const enum CBLAS_ORDER order, const int m, const int n, double* a, const int lda, int* ipiv) {\n  return clapack_dgetrf(order, m, n, a, lda, ipiv);\n}\n\ntemplate <>\ninline int getrf(const enum CBLAS_ORDER order, const int m, const int n, Complex64* a, const int lda, int* ipiv) {\n  return clapack_cgetrf(order, m, n, a, lda, ipiv);\n}\n\ntemplate <>\ninline int getrf(const enum CBLAS_ORDER order, const int m, const int n, Complex128* a, const int lda, int* ipiv) {\n  return clapack_zgetrf(order, m, n, a, lda, ipiv);\n}\n#endif\n\ntemplate <typename DType>\ninline int clapack_getrf(const enum CBLAS_ORDER order, const int m, const int n, void* a, const int lda, int* ipiv) {\n  return getrf<DType>(order, m, n, static_cast<DType*>(a), lda, ipiv);\n}\n\n//getrs\n/*\n * Solves a system of linear equations A*X = B with a general NxN matrix A using the LU factorization computed by GETRF.\n *\n * From ATLAS 3.8.0.\n */\ntemplate <typename DType>\ninline int getrs(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE Trans, const int N, const int NRHS, const DType* A,\n           const int lda, const int* ipiv, DType* B, const int ldb)\n{\n  return nm::math::getrs<DType>(Order, Trans, N, NRHS, A, lda, ipiv, B, ldb);\n}\n\n#if defined (HAVE_CLAPACK_H) || defined (HAVE_ATLAS_CLAPACK_H)\ntemplate <>\ninline int getrs(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE Trans, const int N, const int NRHS, const float* A,\n           const int lda, const int* ipiv, float* B, const int ldb)\n{\n  return clapack_sgetrs(Order, Trans, N, NRHS, A, lda, ipiv, B, ldb);\n}\n\ntemplate <>\ninline int getrs(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE Trans, const int N, const int NRHS, const double* A,\n           const int lda, const int* ipiv, double* B, const int ldb)\n{\n  return clapack_dgetrs(Order, Trans, N, NRHS, A, lda, ipiv, B, ldb);\n}\n\ntemplate <>\ninline int getrs(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE Trans, const int N, const int NRHS, const Complex64* A,\n           const int lda, const int* ipiv, Complex64* B, const int ldb)\n{\n  return clapack_cgetrs(Order, Trans, N, NRHS, A, lda, ipiv, static_cast<void*>(B), ldb);\n}\n\ntemplate <>\ninline int getrs(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE Trans, const int N, const int NRHS, const Complex128* A,\n           const int lda, const int* ipiv, Complex128* B, const int ldb)\n{\n  return clapack_zgetrs(Order, Trans, N, NRHS, A, lda, ipiv, static_cast<void*>(B), ldb);\n}\n#endif\n\ntemplate <typename DType>\ninline int clapack_getrs(const enum CBLAS_ORDER order, const enum CBLAS_TRANSPOSE trans, const int n, const int nrhs,\n                         const void* a, const int lda, const int* ipiv, void* b, const int ldb) {\n  return getrs<DType>(order, trans, n, nrhs, static_cast<const DType*>(a), lda, ipiv, static_cast<DType*>(b), ldb);\n}\n\n\n//Functions without internal implementations below:\n\n//getri\ntemplate <typename DType>\ninline int getri(const enum CBLAS_ORDER order, const int n, DType* a, const int lda, const int* ipiv) {\n  rb_raise(rb_eNotImpError, \"getri not yet implemented for non-BLAS dtypes\");\n  return 0;\n}\n\n#if defined (HAVE_CLAPACK_H) || defined (HAVE_ATLAS_CLAPACK_H)\ntemplate <>\ninline int getri(const enum CBLAS_ORDER order, const int n, float* a, const int lda, const int* ipiv) {\n  return clapack_sgetri(order, n, a, lda, ipiv);\n}\n\ntemplate <>\ninline int getri(const enum CBLAS_ORDER order, const int n, double* a, const int lda, const int* ipiv) {\n  return clapack_dgetri(order, n, a, lda, ipiv);\n}\n\ntemplate <>\ninline int getri(const enum CBLAS_ORDER order, const int n, Complex64* a, const int lda, const int* ipiv) {\n  return clapack_cgetri(order, n, a, lda, ipiv);\n}\n\ntemplate <>\ninline int getri(const enum CBLAS_ORDER order, const int n, Complex128* a, const int lda, const int* ipiv) {\n  return clapack_zgetri(order, n, a, lda, ipiv);\n}\n#endif\n\ntemplate <typename DType>\ninline int clapack_getri(const enum CBLAS_ORDER order, const int n, void* a, const int lda, const int* ipiv) {\n  return getri<DType>(order, n, static_cast<DType*>(a), lda, ipiv);\n}\n\n//potrf\n/*\n * From ATLAS 3.8.0:\n *\n * Computes one of two LU factorizations based on the setting of the Order\n * parameter, as follows:\n * ----------------------------------------------------------------------------\n *                       Order == CblasColMajor\n * Column-major factorization of form\n *   A = P * L * U\n * where P is a row-permutation matrix, L is lower triangular with unit\n * diagonal elements (lower trapazoidal if M > N), and U is upper triangular\n * (upper trapazoidal if M < N).\n *\n * ----------------------------------------------------------------------------\n *                       Order == CblasRowMajor\n * Row-major factorization of form\n *   A = P * L * U\n * where P is a column-permutation matrix, L is lower triangular (lower\n * trapazoidal if M > N), and U is upper triangular with unit diagonals (upper\n * trapazoidal if M < N).\n *\n * ============================================================================\n * Let IERR be the return value of the function:\n *    If IERR == 0, successful exit.\n *    If (IERR < 0) the -IERR argument had an illegal value\n *    If (IERR > 0 && Order == CblasColMajor)\n *       U(i-1,i-1) is exactly zero.  The factorization has been completed,\n *       but the factor U is exactly singular, and division by zero will\n *       occur if it is used to solve a system of equations.\n *    If (IERR > 0 && Order == CblasRowMajor)\n *       L(i-1,i-1) is exactly zero.  The factorization has been completed,\n *       but the factor L is exactly singular, and division by zero will\n *       occur if it is used to solve a system of equations.\n */\ntemplate <typename DType>\ninline int potrf(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int N, DType* A, const int lda) {\n#if defined HAVE_CLAPACK_H || defined HAVE_ATLAS_CLAPACK_H\n  rb_raise(rb_eNotImpError, \"not yet implemented for non-BLAS dtypes\");\n#else\n  rb_raise(rb_eNotImpError, \"only CLAPACK version implemented thus far\");\n#endif\n  return 0;\n}\n\n#if defined HAVE_CLAPACK_H || defined HAVE_ATLAS_CLAPACK_H\ntemplate <>\ninline int potrf(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int N, float* A, const int lda) {\n  return clapack_spotrf(order, uplo, N, A, lda);\n}\n\ntemplate <>\ninline int potrf(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int N, double* A, const int lda) {\n  return clapack_dpotrf(order, uplo, N, A, lda);\n}\n\ntemplate <>\ninline int potrf(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int N, Complex64* A, const int lda) {\n  return clapack_cpotrf(order, uplo, N, A, lda);\n}\n\ntemplate <>\ninline int potrf(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int N, Complex128* A, const int lda) {\n  return clapack_zpotrf(order, uplo, N, A, lda);\n}\n#endif\n\ntemplate <typename DType>\ninline int clapack_potrf(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, void* a, const int lda) {\n  return potrf<DType>(order, uplo, n, static_cast<DType*>(a), lda);\n}\n\n//potri\ntemplate <typename DType>\ninline int potri(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, DType* a, const int lda) {\n  rb_raise(rb_eNotImpError, \"potri not yet implemented for non-BLAS dtypes\");\n  return 0;\n}\n\n\n#if defined HAVE_CLAPACK_H || defined HAVE_ATLAS_CLAPACK_H\ntemplate <>\ninline int potri(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, float* a, const int lda) {\n  return clapack_spotri(order, uplo, n, a, lda);\n}\n\ntemplate <>\ninline int potri(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, double* a, const int lda) {\n  return clapack_dpotri(order, uplo, n, a, lda);\n}\n\ntemplate <>\ninline int potri(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, Complex64* a, const int lda) {\n  return clapack_cpotri(order, uplo, n, a, lda);\n}\n\ntemplate <>\ninline int potri(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, Complex128* a, const int lda) {\n  return clapack_zpotri(order, uplo, n, a, lda);\n}\n#endif\n\ntemplate <typename DType>\ninline int clapack_potri(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, void* a, const int lda) {\n  return potri<DType>(order, uplo, n, static_cast<DType*>(a), lda);\n}\n\n//potrs\n/*\n * Solves a system of linear equations A*X = B with a symmetric positive definite matrix A using the Cholesky factorization computed by POTRF.\n */\ntemplate <typename DType>\ninline int potrs(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const int N, const int NRHS, const DType* A,\n           const int lda, DType* B, const int ldb)\n{\n#if defined HAVE_CLAPACK_H || defined HAVE_ATLAS_CLAPACK_H\n  rb_raise(rb_eNotImpError, \"not yet implemented for non-BLAS dtypes\");\n#else\n  rb_raise(rb_eNotImpError, \"only CLAPACK version implemented thus far\");\n#endif\n}\n\n#if defined (HAVE_CLAPACK_H) || defined (HAVE_ATLAS_CLAPACK_H)\ntemplate <>\ninline int potrs<float> (const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const int N, const int NRHS, const float* A,\n           const int lda, float* B, const int ldb)\n{\n  return clapack_spotrs(Order, Uplo, N, NRHS, A, lda, B, ldb);\n}\n\ntemplate <>\ninline int potrs<double>(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const int N, const int NRHS, const double* A,\n           const int lda, double* B, const int ldb)\n{\n  return clapack_dpotrs(Order, Uplo, N, NRHS, A, lda, B, ldb);\n}\n\ntemplate <>\ninline int potrs<Complex64>(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const int N, const int NRHS, const Complex64* A,\n           const int lda, Complex64* B, const int ldb)\n{\n  return clapack_cpotrs(Order, Uplo, N, NRHS, A, lda, static_cast<void *>(B), ldb);\n}\n\ntemplate <>\ninline int potrs<Complex128>(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const int N, const int NRHS, const Complex128* A,\n           const int lda, Complex128* B, const int ldb)\n{\n  return clapack_zpotrs(Order, Uplo, N, NRHS, A, lda, static_cast<void *>(B), ldb);\n}\n#endif\n\ntemplate <typename DType>\ninline int clapack_potrs(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, const int nrhs,\n                         const void* a, const int lda, void* b, const int ldb) {\n  return potrs<DType>(order, uplo, n, nrhs, static_cast<const DType*>(a), lda, static_cast<DType*>(b), ldb);\n}\n\n}}}\n\n#endif\n"
  },
  {
    "path": "ext/nmatrix_atlas/math_atlas/geev.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == geev.h\n//\n// Header file for interface with LAPACK's xGEEV functions.\n//\n\n#ifndef GEEV_H\n# define GEEV_H\n\nextern \"C\" {\n  void sgeev_(char* jobvl, char* jobvr, int* n, float* a,          int* lda, float* wr,  float* wi,  float* vl,          int* ldvl, float* vr,          int* ldvr, float* work,          int* lwork,                int* info);\n  void dgeev_(char* jobvl, char* jobvr, int* n, double* a,         int* lda, double* wr, double* wi, double* vl,         int* ldvl, double* vr,         int* ldvr, double* work,         int* lwork,                int* info);\n  void cgeev_(char* jobvl, char* jobvr, int* n, nm::Complex64* a,  int* lda, nm::Complex64* w,       nm::Complex64* vl,  int* ldvl, nm::Complex64* vr,  int* ldvr, nm::Complex64* work,  int* lwork, float* rwork,  int* info);\n  void zgeev_(char* jobvl, char* jobvr, int* n, nm::Complex128* a, int* lda, nm::Complex128* w,      nm::Complex128* vl, int* ldvl, nm::Complex128* vr, int* ldvr, nm::Complex128* work, int* lwork, double* rwork, int* info);\n}\n\nnamespace nm { namespace math { namespace atlas {\n\ntemplate <typename DType, typename CType>                         // wr\ninline int geev(char jobvl, char jobvr, int n, DType* a, int lda, DType* w, DType* wi, DType* vl, int ldvl, DType* vr, int ldvr, DType* work, int lwork, CType* rwork) {\n  rb_raise(rb_eNotImpError, \"not yet implemented for non-BLAS dtypes\");\n  return -1;\n}\n\ntemplate <>\ninline int geev(char jobvl, char jobvr, int n, float* a, int lda, float* w, float* wi, float* vl, int ldvl, float* vr, int ldvr, float* work, int lwork, float* rwork) {\n  int info;\n  sgeev_(&jobvl, &jobvr, &n, a, &lda, w, wi, vl, &ldvl, vr, &ldvr, work, &lwork, &info);\n  return info;\n}\n\ntemplate <>\ninline int geev(char jobvl, char jobvr, int n, double* a, int lda, double* w, double* wi, double* vl, int ldvl, double* vr, int ldvr, double* work, int lwork, double* rwork) {\n  int info;\n  dgeev_(&jobvl, &jobvr, &n, a, &lda, w, wi, vl, &ldvl, vr, &ldvr, work, &lwork, &info);\n  return info;\n}\n\ntemplate <>\ninline int geev(char jobvl, char jobvr, int n, Complex64* a, int lda, Complex64* w, Complex64* wi, Complex64* vl, int ldvl, Complex64* vr, int ldvr, Complex64* work, int lwork, float* rwork) {\n  int info;\n  cgeev_(&jobvl, &jobvr, &n, a, &lda, w, vl, &ldvl, vr, &ldvr, work, &lwork, rwork, &info);\n  return info;\n}\n\ntemplate <>\ninline int geev(char jobvl, char jobvr, int n, Complex128* a, int lda, Complex128* w, Complex128* wi, Complex128* vl, int ldvl, Complex128* vr, int ldvr, Complex128* work, int lwork, double* rwork) {\n  int info;\n  zgeev_(&jobvl, &jobvr, &n, a, &lda, w, vl, &ldvl, vr, &ldvr, work, &lwork, rwork, &info);\n  return info;\n}\n\ntemplate <typename DType, typename CType>\ninline int lapack_geev(char jobvl, char jobvr, int n, void* a, int lda, void* w, void* wi, void* vl, int ldvl, void* vr, int ldvr, void* work, int lwork, void* rwork) {\n  return geev<DType,CType>(jobvl, jobvr, n, reinterpret_cast<DType*>(a), lda, reinterpret_cast<DType*>(w), reinterpret_cast<DType*>(wi), reinterpret_cast<DType*>(vl), ldvl, reinterpret_cast<DType*>(vr), ldvr, reinterpret_cast<DType*>(work), lwork, reinterpret_cast<CType*>(rwork));\n}\n\n}}} // end nm::math::atlas\n\n#endif // GEEV_H\n"
  },
  {
    "path": "ext/nmatrix_atlas/math_atlas/gesdd.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == gesdd.h\n//\n// Header file for interface with LAPACK's xGESDD functions.\n//\n\n#ifndef GESDD_H\n# define GESDD_H\n\nextern \"C\" {\n\n  void sgesdd_(char*, int*, int*, float*, int*, float*, float*, int*, float*, int*, float*, int*, int*, int*);\n  void dgesdd_(char*, int*, int*, double*, int*, double*, double*, int*, double*, int*, double*, int*, int*, int*);\n  //the argument s is an array of real values and is returned as array of float/double\n  void cgesdd_(char*, int*, int*, nm::Complex64*, int*, float* s, nm::Complex64*, int*, nm::Complex64*, int*, nm::Complex64*, int*, float*, int*, int*);\n  void zgesdd_(char*, int*, int*, nm::Complex128*, int*, double* s, nm::Complex128*, int*, nm::Complex128*, int*, nm::Complex128*, int*, double*, int*, int*);\n}\n\nnamespace nm {\n  namespace math {\n  namespace atlas {\n\n    template <typename DType, typename CType>\n    inline int gesdd(char jobz, int m, int n, DType* a, int lda, CType* s, DType* u, int ldu, DType* vt, int ldvt, DType* work, int lwork, int* iwork, CType* rwork) {\n      rb_raise(rb_eNotImpError, \"not yet implemented for non-BLAS dtypes\");\n      return -1;\n    }\n\n    template <>\n    inline int gesdd(char jobz, int m, int n, float* a, int lda, float* s, float* u, int ldu, float* vt, int ldvt, float* work, int lwork, int* iwork, float* rwork) {\n      int info;\n      sgesdd_(&jobz, &m, &n, a, &lda, s, u, &ldu, vt, &ldvt, work, &lwork, iwork, &info);\n      return info;\n    }\n\n    template <>\n    inline int gesdd(char jobz, int m, int n, double* a, int lda, double* s, double* u, int ldu, double* vt, int ldvt, double* work, int lwork, int* iwork, double* rwork) {\n      int info;\n      dgesdd_(&jobz, &m, &n, a, &lda, s, u, &ldu, vt, &ldvt, work, &lwork, iwork, &info);\n      return info;\n    }\n\n    template <>\n    inline int gesdd(char jobz, int m, int n, nm::Complex64* a, int lda, float* s, nm::Complex64* u, int ldu, nm::Complex64* vt, int ldvt, nm::Complex64* work, int lwork, int* iwork, float* rwork) {\n      int info;\n      cgesdd_(&jobz, &m, &n, a, &lda, s, u, &ldu, vt, &ldvt, work, &lwork, rwork, iwork, &info);\n      return info;\n    }\n\n    template <>\n    inline int gesdd(char jobz, int m, int n, nm::Complex128* a, int lda, double* s, nm::Complex128* u, int ldu, nm::Complex128* vt, int ldvt, nm::Complex128* work, int lwork, int* iwork, double* rwork) {\n      int info;\n      zgesdd_(&jobz, &m, &n, a, &lda, s, u, &ldu, vt, &ldvt, work, &lwork, rwork, iwork, &info);\n      return info;\n    }\n\n  } // end of namespace atlas\n  } // end of namespace math\n} // end of namespace nm\n\n#endif // GESDD_H\n"
  },
  {
    "path": "ext/nmatrix_atlas/math_atlas/gesvd.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == gesvd.h\n//\n// Header file for interface with LAPACK's xGESVD functions.\n//\n\n#ifndef GESVD_H\n# define GESVD_H\n\nextern \"C\" {\n  void sgesvd_(char*, char*, int*, int*, float*, int*, float*, float*, int*, float*, int*, float*, int*, int*);\n  void dgesvd_(char*, char*, int*, int*, double*, int*, double*, double*, int*, double*, int*, double*, int*, int*);\n  //the argument s is an array of real values and is returned as array of float/double\n  void cgesvd_(char*, char*, int*, int*, nm::Complex64*, int*, float* s, nm::Complex64*, int*, nm::Complex64*, int*, nm::Complex64*, int*, float*, int*);\n  void zgesvd_(char*, char*, int*, int*, nm::Complex128*, int*, double* s, nm::Complex128*, int*, nm::Complex128*, int*, nm::Complex128*, int*, double*, int*);\n}\n\nnamespace nm {\n  namespace math {\n  namespace atlas {\n\n    template <typename DType, typename CType>\n    inline int gesvd(char jobu, char jobvt, int m, int n, DType* a, int lda, CType* s, DType* u, int ldu, DType* vt, int ldvt, DType* work, int lwork, CType* rwork) {\n      rb_raise(rb_eNotImpError, \"not yet implemented for non-BLAS dtypes\");\n      return -1;\n    }\n\n    template <>\n    inline int gesvd(char jobu, char jobvt, int m, int n, float* a, int lda, float* s, float* u, int ldu, float* vt, int ldvt, float* work, int lwork, float* rwork) {\n      int info;\n      sgesvd_(&jobu, &jobvt, &m, &n, a, &lda, s, u, &ldu, vt, &ldvt, work, &lwork, &info);\n      return info;\n    }\n\n    template <>\n    inline int gesvd(char jobu, char jobvt, int m, int n, double* a, int lda, double* s, double* u, int ldu, double* vt, int ldvt, double* work, int lwork, double* rwork) {\n      int info;\n      dgesvd_(&jobu, &jobvt, &m, &n, a, &lda, s, u, &ldu, vt, &ldvt, work, &lwork, &info);\n      return info;\n    }\n\n    template <>\n    inline int gesvd(char jobu, char jobvt, int m, int n, nm::Complex64* a, int lda, float* s, nm::Complex64* u, int ldu, nm::Complex64* vt, int ldvt, nm::Complex64* work, int lwork, float* rwork) {\n      int info;\n      cgesvd_(&jobu, &jobvt, &m, &n, a, &lda, s, u, &ldu, vt, &ldvt, work, &lwork, rwork, &info);\n      return info;\n    }\n\n    template <>\n    inline int gesvd(char jobu, char jobvt, int m, int n, nm::Complex128* a, int lda, double* s, nm::Complex128* u, int ldu, nm::Complex128* vt, int ldvt, nm::Complex128* work, int lwork, double* rwork) {\n      int info;\n      zgesvd_(&jobu, &jobvt, &m, &n, a, &lda, s, u, &ldu, vt, &ldvt, work, &lwork, rwork, &info);\n      return info;\n    }\n\n  } // end of namespace atlas\n  } // end of namespace math\n} // end of namespace nm\n#endif // GESVD_H\n"
  },
  {
    "path": "ext/nmatrix_atlas/math_atlas/inc.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == inc.h\n//\n// Includes needed for LAPACK, CLAPACK, and CBLAS functions.\n//\n\n#ifndef INC_H\n# define INC_H\n\n\nextern \"C\" { // These need to be in an extern \"C\" block or you'll get all kinds of undefined symbol errors.\n#if defined HAVE_CBLAS_H\n  #include <cblas.h>\n#elif defined HAVE_ATLAS_CBLAS_H\n  #include <atlas/cblas.h>\n#endif\n\n#if defined HAVE_CLAPACK_H\n  #include <clapack.h>\n#elif defined HAVE_ATLAS_CLAPACK_H\n  #include <atlas/clapack.h>\n#endif\n}\n\n#endif // INC_H\n"
  },
  {
    "path": "ext/nmatrix_atlas/math_atlas.cpp",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == math_atlas.cpp\n//\n// Ruby-exposed CBLAS and LAPACK functions that call ATLAS\n// functions.\n//\n\n/*\n * Project Includes\n */\n\n#include \"data/data.h\"\n\n#include \"math_atlas/inc.h\"\n\n#include \"math/util.h\"\n\n//BLAS\n#include \"math_atlas/cblas_templates_atlas.h\"\n\n//LAPACK\n#include \"math/laswp.h\"\n#include \"math_atlas/clapack_templates.h\"\n\n#include \"math_atlas/gesvd.h\"\n#include \"math_atlas/gesdd.h\"\n#include \"math_atlas/geev.h\"\n\n\n/*\n * Forward Declarations\n */\n\nextern \"C\" {\n  /* BLAS Level 1. */\n  static VALUE nm_atlas_cblas_scal(VALUE self, VALUE n, VALUE scale, VALUE vector, VALUE incx);\n  static VALUE nm_atlas_cblas_nrm2(VALUE self, VALUE n, VALUE x, VALUE incx);\n  static VALUE nm_atlas_cblas_asum(VALUE self, VALUE n, VALUE x, VALUE incx);\n  static VALUE nm_atlas_cblas_rot(VALUE self, VALUE n, VALUE x, VALUE incx, VALUE y, VALUE incy, VALUE c, VALUE s);\n  static VALUE nm_atlas_cblas_rotg(VALUE self, VALUE ab);\n  static VALUE nm_atlas_cblas_imax(VALUE self, VALUE n, VALUE x, VALUE incx);\n\n  /* BLAS Level 2. */\n  static VALUE nm_atlas_cblas_gemv(VALUE self, VALUE trans_a, VALUE m, VALUE n, VALUE vAlpha, VALUE a, VALUE lda,\n                             VALUE x, VALUE incx, VALUE vBeta, VALUE y, VALUE incy);\n\n  /* BLAS Level 3. */\n  static VALUE nm_atlas_cblas_gemm(VALUE self, VALUE order, VALUE trans_a, VALUE trans_b, VALUE m, VALUE n, VALUE k, VALUE vAlpha,\n                             VALUE a, VALUE lda, VALUE b, VALUE ldb, VALUE vBeta, VALUE c, VALUE ldc);\n  static VALUE nm_atlas_cblas_trsm(VALUE self, VALUE order, VALUE side, VALUE uplo, VALUE trans_a, VALUE diag, VALUE m, VALUE n,\n                             VALUE vAlpha, VALUE a, VALUE lda, VALUE b, VALUE ldb);\n  static VALUE nm_atlas_cblas_trmm(VALUE self, VALUE order, VALUE side, VALUE uplo, VALUE trans_a, VALUE diag, VALUE m, VALUE n,\n                             VALUE alpha, VALUE a, VALUE lda, VALUE b, VALUE ldb);\n  static VALUE nm_atlas_cblas_herk(VALUE self, VALUE order, VALUE uplo, VALUE trans, VALUE n, VALUE k, VALUE alpha, VALUE a,\n                             VALUE lda, VALUE beta, VALUE c, VALUE ldc);\n  static VALUE nm_atlas_cblas_syrk(VALUE self, VALUE order, VALUE uplo, VALUE trans, VALUE n, VALUE k, VALUE alpha, VALUE a,\n                             VALUE lda, VALUE beta, VALUE c, VALUE ldc);\n\n  /* LAPACK. */\n  static VALUE nm_atlas_has_clapack(VALUE self);\n  static VALUE nm_atlas_clapack_getrf(VALUE self, VALUE order, VALUE m, VALUE n, VALUE a, VALUE lda);\n  static VALUE nm_atlas_clapack_potrf(VALUE self, VALUE order, VALUE uplo, VALUE n, VALUE a, VALUE lda);\n  static VALUE nm_atlas_clapack_getrs(VALUE self, VALUE order, VALUE trans, VALUE n, VALUE nrhs, VALUE a, VALUE lda, VALUE ipiv, VALUE b, VALUE ldb);\n  static VALUE nm_atlas_clapack_potrs(VALUE self, VALUE order, VALUE uplo, VALUE n, VALUE nrhs, VALUE a, VALUE lda, VALUE b, VALUE ldb);\n  static VALUE nm_atlas_clapack_getri(VALUE self, VALUE order, VALUE n, VALUE a, VALUE lda, VALUE ipiv);\n  static VALUE nm_atlas_clapack_potri(VALUE self, VALUE order, VALUE uplo, VALUE n, VALUE a, VALUE lda);\n  static VALUE nm_atlas_clapack_laswp(VALUE self, VALUE n, VALUE a, VALUE lda, VALUE k1, VALUE k2, VALUE ipiv, VALUE incx);\n\n  static VALUE nm_atlas_lapack_gesvd(VALUE self, VALUE jobu, VALUE jobvt, VALUE m, VALUE n, VALUE a, VALUE lda, VALUE s, VALUE u, VALUE ldu, VALUE vt, VALUE ldvt, VALUE lworkspace_size);\n  static VALUE nm_atlas_lapack_gesdd(VALUE self, VALUE jobz, VALUE m, VALUE n, VALUE a, VALUE lda, VALUE s, VALUE u, VALUE ldu, VALUE vt, VALUE ldvt, VALUE lworkspace_size);\n  static VALUE nm_atlas_lapack_geev(VALUE self, VALUE compute_left, VALUE compute_right, VALUE n, VALUE a, VALUE lda, VALUE w, VALUE wi, VALUE vl, VALUE ldvl, VALUE vr, VALUE ldvr, VALUE lwork);\n}\n\n////////////////////\n// Math Functions //\n////////////////////\n\nnamespace nm { \n  namespace math {\n  namespace atlas {\n\n    /*\n     * Function signature conversion for calling CBLAS' gesvd functions as directly as possible.\n     */\n    template <typename DType, typename CType>\n    inline static int lapack_gesvd(char jobu, char jobvt, int m, int n, void* a, int lda, void* s, void* u, int ldu, void* vt, int ldvt, void* work, int lwork, void* rwork) {\n      return gesvd<DType,CType>(jobu, jobvt, m, n, reinterpret_cast<DType*>(a), lda, reinterpret_cast<CType*>(s), reinterpret_cast<DType*>(u), ldu, reinterpret_cast<DType*>(vt), ldvt, reinterpret_cast<DType*>(work), lwork, reinterpret_cast<CType*>(rwork));\n    }\n\n    /*\n     * Function signature conversion for calling CBLAS' gesdd functions as directly as possible.\n     */\n    template <typename DType, typename CType>\n    inline static int lapack_gesdd(char jobz, int m, int n, void* a, int lda, void* s, void* u, int ldu, void* vt, int ldvt, void* work, int lwork, int* iwork, void* rwork) {\n      return gesdd<DType,CType>(jobz, m, n, reinterpret_cast<DType*>(a), lda, reinterpret_cast<CType*>(s), reinterpret_cast<DType*>(u), ldu, reinterpret_cast<DType*>(vt), ldvt, reinterpret_cast<DType*>(work), lwork, iwork, reinterpret_cast<CType*>(rwork));\n    }\n\n\n  }\n  }\n}\n\nextern \"C\" {\n\n///////////////////\n// Ruby Bindings //\n///////////////////\n\nvoid nm_math_init_atlas() {\n  VALUE cNMatrix_ATLAS = rb_define_module_under(cNMatrix, \"ATLAS\");\n\n  rb_define_singleton_method(cNMatrix, \"has_clapack?\", (METHOD)nm_atlas_has_clapack, 0);\n\n  VALUE cNMatrix_ATLAS_LAPACK = rb_define_module_under(cNMatrix_ATLAS, \"LAPACK\");\n\n  /* ATLAS-CLAPACK Functions */\n  rb_define_singleton_method(cNMatrix_ATLAS_LAPACK, \"clapack_getrf\", (METHOD)nm_atlas_clapack_getrf, 5);\n  rb_define_singleton_method(cNMatrix_ATLAS_LAPACK, \"clapack_potrf\", (METHOD)nm_atlas_clapack_potrf, 5);\n  rb_define_singleton_method(cNMatrix_ATLAS_LAPACK, \"clapack_getrs\", (METHOD)nm_atlas_clapack_getrs, 9);\n  rb_define_singleton_method(cNMatrix_ATLAS_LAPACK, \"clapack_potrs\", (METHOD)nm_atlas_clapack_potrs, 8);\n  rb_define_singleton_method(cNMatrix_ATLAS_LAPACK, \"clapack_getri\", (METHOD)nm_atlas_clapack_getri, 5);\n  rb_define_singleton_method(cNMatrix_ATLAS_LAPACK, \"clapack_potri\", (METHOD)nm_atlas_clapack_potri, 5);\n  rb_define_singleton_method(cNMatrix_ATLAS_LAPACK, \"clapack_laswp\", (METHOD)nm_atlas_clapack_laswp, 7);\n\n  /* Non-ATLAS regular LAPACK Functions called via Fortran interface */\n  rb_define_singleton_method(cNMatrix_ATLAS_LAPACK, \"lapack_gesvd\", (METHOD)nm_atlas_lapack_gesvd, 12);\n  rb_define_singleton_method(cNMatrix_ATLAS_LAPACK, \"lapack_gesdd\", (METHOD)nm_atlas_lapack_gesdd, 11);\n  rb_define_singleton_method(cNMatrix_ATLAS_LAPACK, \"lapack_geev\",  (METHOD)nm_atlas_lapack_geev,  12);\n\n  VALUE cNMatrix_ATLAS_BLAS = rb_define_module_under(cNMatrix_ATLAS, \"BLAS\");\n\n  //BLAS Level 1\n  rb_define_singleton_method(cNMatrix_ATLAS_BLAS, \"cblas_scal\", (METHOD)nm_atlas_cblas_scal, 4);\n  rb_define_singleton_method(cNMatrix_ATLAS_BLAS, \"cblas_nrm2\", (METHOD)nm_atlas_cblas_nrm2, 3);\n  rb_define_singleton_method(cNMatrix_ATLAS_BLAS, \"cblas_asum\", (METHOD)nm_atlas_cblas_asum, 3);\n  rb_define_singleton_method(cNMatrix_ATLAS_BLAS, \"cblas_rot\",  (METHOD)nm_atlas_cblas_rot,  7);\n  rb_define_singleton_method(cNMatrix_ATLAS_BLAS, \"cblas_rotg\", (METHOD)nm_atlas_cblas_rotg, 1);\n  rb_define_singleton_method(cNMatrix_ATLAS_BLAS, \"cblas_imax\", (METHOD)nm_atlas_cblas_imax, 3);\n\n  //BLAS Level 2\n  rb_define_singleton_method(cNMatrix_ATLAS_BLAS, \"cblas_gemv\", (METHOD)nm_atlas_cblas_gemv, 11);\n\n  //BLAS Level 3\n  rb_define_singleton_method(cNMatrix_ATLAS_BLAS, \"cblas_gemm\", (METHOD)nm_atlas_cblas_gemm, 14);\n  rb_define_singleton_method(cNMatrix_ATLAS_BLAS, \"cblas_trsm\", (METHOD)nm_atlas_cblas_trsm, 12);\n  rb_define_singleton_method(cNMatrix_ATLAS_BLAS, \"cblas_trmm\", (METHOD)nm_atlas_cblas_trmm, 12);\n  rb_define_singleton_method(cNMatrix_ATLAS_BLAS, \"cblas_syrk\", (METHOD)nm_atlas_cblas_syrk, 11);\n  rb_define_singleton_method(cNMatrix_ATLAS_BLAS, \"cblas_herk\", (METHOD)nm_atlas_cblas_herk, 11);\n\n}\n\n/*\n * Simple way to check from within Ruby code if clapack functions are available, without\n * having to wait around for an exception to be thrown.\n */\nstatic VALUE nm_atlas_has_clapack(VALUE self) {\n#if defined (HAVE_CLAPACK_H) || defined (HAVE_ATLAS_CLAPACK_H)\n  return Qtrue;\n#else\n  return Qfalse;\n#endif\n}\n\n/*\n * call-seq:\n *     NMatrix::BLAS.cblas_scal(n, alpha, vector, inc) -> NMatrix\n *\n * BLAS level 1 function +scal+. Works with all dtypes.\n *\n * Scale +vector+ in-place by +alpha+ and also return it. The operation is as\n * follows:\n *  x <- alpha * x\n *\n * - +n+ -> Number of elements of +vector+.\n * - +alpha+ -> Scalar value used in the operation.\n * - +vector+ -> NMatrix of shape [n,1] or [1,n]. Modified in-place.\n * - +inc+ -> Increment used in the scaling function. Should generally be 1.\n */\nstatic VALUE nm_atlas_cblas_scal(VALUE self, VALUE n, VALUE alpha, VALUE vector, VALUE incx) {\n  nm::dtype_t dtype = NM_DTYPE(vector);\n\n  void* scalar = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n  rubyval_to_cval(alpha, dtype, scalar);\n\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::math::atlas::cblas_scal, void, const int n,\n      const void* scalar, void* x, const int incx);\n\n  ttable[dtype](FIX2INT(n), scalar, NM_STORAGE_DENSE(vector)->elements,\n      FIX2INT(incx));\n\n  return vector;\n}\n\n/*\n * Call any of the cblas_xrotg functions as directly as possible.\n *\n * xROTG computes the elements of a Givens plane rotation matrix such that:\n *\n *  |  c s |   | a |   | r |\n *  | -s c | * | b | = | 0 |\n *\n * where r = +- sqrt( a**2 + b**2 ) and c**2 + s**2 = 1.\n *\n * The Givens plane rotation can be used to introduce zero elements into a matrix selectively.\n *\n * This function differs from most of the other raw BLAS accessors. Instead of\n * providing a, b, c, s as arguments, you should only provide a and b (the\n * inputs), and you should provide them as the first two elements of any dense\n * NMatrix type.\n *\n * The outputs [c,s] will be returned in a Ruby Array at the end; the input\n * NMatrix will also be modified in-place.\n *\n * This function, like the other cblas_ functions, does minimal type-checking.\n */\nstatic VALUE nm_atlas_cblas_rotg(VALUE self, VALUE ab) {\n  static void (*ttable[nm::NUM_DTYPES])(void* a, void* b, void* c, void* s) = {\n      NULL, NULL, NULL, NULL, NULL, // can't represent c and s as integers, so no point in having integer operations.\n      nm::math::atlas::cblas_rotg<float>,\n      nm::math::atlas::cblas_rotg<double>,\n      nm::math::atlas::cblas_rotg<nm::Complex64>,\n      nm::math::atlas::cblas_rotg<nm::Complex128>,\n      NULL //nm::math::atlas::cblas_rotg<nm::RubyObject>\n  };\n\n  nm::dtype_t dtype = NM_DTYPE(ab);\n\n  if (!ttable[dtype]) {\n    rb_raise(nm_eDataTypeError, \"this operation undefined for integer vectors\");\n    return Qnil;\n\n  } else {\n    NM_CONSERVATIVE(nm_register_value(&self));\n    NM_CONSERVATIVE(nm_register_value(&ab));\n    void *pC = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]),\n         *pS = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n\n    // extract A and B from the NVector (first two elements)\n    void* pA = NM_STORAGE_DENSE(ab)->elements;\n    void* pB = (char*)(NM_STORAGE_DENSE(ab)->elements) + DTYPE_SIZES[dtype];\n    // c and s are output\n\n    ttable[dtype](pA, pB, pC, pS);\n\n    VALUE result = rb_ary_new2(2);\n\n    if (dtype == nm::RUBYOBJ) {\n      rb_ary_store(result, 0, *reinterpret_cast<VALUE*>(pC));\n      rb_ary_store(result, 1, *reinterpret_cast<VALUE*>(pS));\n    } else {\n      rb_ary_store(result, 0, nm::rubyobj_from_cval(pC, dtype).rval);\n      rb_ary_store(result, 1, nm::rubyobj_from_cval(pS, dtype).rval);\n    }\n    NM_CONSERVATIVE(nm_unregister_value(&ab));\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n    return result;\n  }\n}\n\n\n/*\n * Call any of the cblas_xrot functions as directly as possible.\n *\n * xROT is a BLAS level 1 routine (taking two vectors) which applies a plane rotation.\n *\n * It's tough to find documentation on xROT. Here are what we think the arguments are for:\n *  * n     :: number of elements to consider in x and y\n *  * x     :: a vector (expects an NVector)\n *  * incx  :: stride of x\n *  * y     :: a vector (expects an NVector)\n *  * incy  :: stride of y\n *  * c     :: cosine of the angle of rotation\n *  * s     :: sine of the angle of rotation\n *\n * Note that c and s will be the same dtype as x and y, except when x and y are complex. If x and y are complex, c and s\n * will be float for Complex64 or double for Complex128.\n *\n * You probably don't want to call this function. Instead, why don't you try rot, which is more flexible\n * with its arguments?\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n */\nstatic VALUE nm_atlas_cblas_rot(VALUE self, VALUE n, VALUE x, VALUE incx, VALUE y, VALUE incy, VALUE c, VALUE s) {\n  static void (*ttable[nm::NUM_DTYPES])(const int N, void*, const int, void*, const int, const void*, const void*) = {\n      NULL, NULL, NULL, NULL, NULL, // can't represent c and s as integers, so no point in having integer operations.\n      nm::math::atlas::cblas_rot<float,float>,\n      nm::math::atlas::cblas_rot<double,double>,\n      nm::math::atlas::cblas_rot<nm::Complex64,float>,\n      nm::math::atlas::cblas_rot<nm::Complex128,double>,\n      nm::math::atlas::cblas_rot<nm::RubyObject,nm::RubyObject>\n  };\n\n  nm::dtype_t dtype = NM_DTYPE(x);\n\n\n  if (!ttable[dtype]) {\n    rb_raise(nm_eDataTypeError, \"this operation undefined for integer vectors\");\n    return Qfalse;\n  } else {\n    void *pC, *pS;\n\n    // We need to ensure the cosine and sine arguments are the correct dtype -- which may differ from the actual dtype.\n    if (dtype == nm::COMPLEX64) {\n      pC = NM_ALLOCA_N(float,1);\n      pS = NM_ALLOCA_N(float,1);\n      rubyval_to_cval(c, nm::FLOAT32, pC);\n      rubyval_to_cval(s, nm::FLOAT32, pS);\n    } else if (dtype == nm::COMPLEX128) {\n      pC = NM_ALLOCA_N(double,1);\n      pS = NM_ALLOCA_N(double,1);\n      rubyval_to_cval(c, nm::FLOAT64, pC);\n      rubyval_to_cval(s, nm::FLOAT64, pS);\n    } else {\n      pC = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n      pS = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n      rubyval_to_cval(c, dtype, pC);\n      rubyval_to_cval(s, dtype, pS);\n    }\n\n\n    ttable[dtype](FIX2INT(n), NM_STORAGE_DENSE(x)->elements, FIX2INT(incx), NM_STORAGE_DENSE(y)->elements, FIX2INT(incy), pC, pS);\n\n    return Qtrue;\n  }\n}\n\n\n/*\n * Call any of the cblas_xnrm2 functions as directly as possible.\n *\n * xNRM2 is a BLAS level 1 routine which calculates the 2-norm of an n-vector x.\n *\n * Arguments:\n *  * n     :: length of x, must be at least 0\n *  * x     :: pointer to first entry of input vector\n *  * incx  :: stride of x, must be POSITIVE (ATLAS says non-zero, but 3.8.4 code only allows positive)\n *\n * You probably don't want to call this function. Instead, why don't you try nrm2, which is more flexible\n * with its arguments?\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n */\nstatic VALUE nm_atlas_cblas_nrm2(VALUE self, VALUE n, VALUE x, VALUE incx) {\n\n  static void (*ttable[nm::NUM_DTYPES])(const int N, const void* X, const int incX, void* sum) = {\n      NULL, NULL, NULL, NULL, NULL, // no help for integers\n      nm::math::atlas::cblas_nrm2<float32_t>,\n      nm::math::atlas::cblas_nrm2<float64_t>,\n      nm::math::atlas::cblas_nrm2<nm::Complex64>,\n      nm::math::atlas::cblas_nrm2<nm::Complex128>,\n      nm::math::atlas::cblas_nrm2<nm::RubyObject>\n  };\n\n  nm::dtype_t dtype  = NM_DTYPE(x);\n\n  if (!ttable[dtype]) {\n    rb_raise(nm_eDataTypeError, \"this operation undefined for integer vectors\");\n    return Qnil;\n\n  } else {\n    // Determine the return dtype and allocate it\n    nm::dtype_t rdtype = dtype;\n    if      (dtype == nm::COMPLEX64)  rdtype = nm::FLOAT32;\n    else if (dtype == nm::COMPLEX128) rdtype = nm::FLOAT64;\n\n    void *Result = NM_ALLOCA_N(char, DTYPE_SIZES[rdtype]);\n\n    ttable[dtype](FIX2INT(n), NM_STORAGE_DENSE(x)->elements, FIX2INT(incx), Result);\n\n    return nm::rubyobj_from_cval(Result, rdtype).rval;\n  }\n}\n\n\n\n/*\n * Call any of the cblas_xasum functions as directly as possible.\n *\n * xASUM is a BLAS level 1 routine which calculates the sum of absolute values of the entries\n * of a vector x.\n *\n * Arguments:\n *  * n     :: length of x, must be at least 0\n *  * x     :: pointer to first entry of input vector\n *  * incx  :: stride of x, must be POSITIVE (ATLAS says non-zero, but 3.8.4 code only allows positive)\n *\n * You probably don't want to call this function. Instead, why don't you try asum, which is more flexible\n * with its arguments?\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n */\nstatic VALUE nm_atlas_cblas_asum(VALUE self, VALUE n, VALUE x, VALUE incx) {\n\n  static void (*ttable[nm::NUM_DTYPES])(const int N, const void* X, const int incX, void* sum) = {\n      nm::math::atlas::cblas_asum<uint8_t>,\n      nm::math::atlas::cblas_asum<int8_t>,\n      nm::math::atlas::cblas_asum<int16_t>,\n      nm::math::atlas::cblas_asum<int32_t>,\n      nm::math::atlas::cblas_asum<int64_t>,\n      nm::math::atlas::cblas_asum<float32_t>,\n      nm::math::atlas::cblas_asum<float64_t>,\n      nm::math::atlas::cblas_asum<nm::Complex64>,\n      nm::math::atlas::cblas_asum<nm::Complex128>,\n      nm::math::atlas::cblas_asum<nm::RubyObject>\n  };\n\n  nm::dtype_t dtype  = NM_DTYPE(x);\n\n  // Determine the return dtype and allocate it\n  nm::dtype_t rdtype = dtype;\n  if      (dtype == nm::COMPLEX64)  rdtype = nm::FLOAT32;\n  else if (dtype == nm::COMPLEX128) rdtype = nm::FLOAT64;\n\n  void *Result = NM_ALLOCA_N(char, DTYPE_SIZES[rdtype]);\n\n  ttable[dtype](FIX2INT(n), NM_STORAGE_DENSE(x)->elements, FIX2INT(incx), Result);\n\n  return nm::rubyobj_from_cval(Result, rdtype).rval;\n}\n\n/*\n * call-seq:\n *    NMatrix::BLAS.cblas_imax(n, vector, inc) -> Fixnum\n *\n * BLAS level 1 routine.\n *\n * Return the index of the largest element of +vector+.\n *\n * - +n+ -> Vector's size. Generally, you can use NMatrix#rows or NMatrix#cols.\n * - +vector+ -> A NMatrix of shape [n,1] or [1,n] with any dtype.\n * - +inc+ -> It's the increment used when searching. Use 1 except if you know\n *   what you're doing.\n */\nstatic VALUE nm_atlas_cblas_imax(VALUE self, VALUE n, VALUE x, VALUE incx) {\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::math::atlas::cblas_imax, int, const int n, const void* x, const int incx);\n\n  nm::dtype_t dtype = NM_DTYPE(x);\n\n  int index = ttable[dtype](FIX2INT(n), NM_STORAGE_DENSE(x)->elements, FIX2INT(incx));\n\n  // Convert to Ruby's Int value.\n  return INT2FIX(index);\n}\n\n/* Call any of the cblas_xgemv functions as directly as possible.\n *\n * The cblas_xgemv functions (dgemv, sgemv, cgemv, and zgemv) define the following operation:\n *\n *    y = alpha*op(A)*x + beta*y\n *\n * where op(A) is one of <tt>op(A) = A</tt>, <tt>op(A) = A**T</tt>, or the complex conjugate of A.\n *\n * Note that this will only work for dense matrices that are of types :float32, :float64, :complex64, and :complex128.\n * Other types are not implemented in BLAS, and while they exist in NMatrix, this method is intended only to\n * expose the ultra-optimized ATLAS versions.\n *\n * == Arguments\n * See: http://www.netlib.org/blas/dgemm.f\n *\n * You probably don't want to call this function. Instead, why don't you try cblas_gemv, which is more flexible\n * with its arguments?\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n */\nstatic VALUE nm_atlas_cblas_gemv(VALUE self,\n                           VALUE trans_a,\n                           VALUE m, VALUE n,\n                           VALUE alpha,\n                           VALUE a, VALUE lda,\n                           VALUE x, VALUE incx,\n                           VALUE beta,\n                           VALUE y, VALUE incy)\n{\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::math::atlas::cblas_gemv, bool, const enum CBLAS_TRANSPOSE, const int, const int, const void*, const void*, const int, const void*, const int, const void*, void*, const int)\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n  void *pAlpha = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]),\n       *pBeta  = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n  rubyval_to_cval(alpha, dtype, pAlpha);\n  rubyval_to_cval(beta, dtype, pBeta);\n\n  return ttable[dtype](blas_transpose_sym(trans_a), FIX2INT(m), FIX2INT(n), pAlpha, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), NM_STORAGE_DENSE(x)->elements, FIX2INT(incx), pBeta, NM_STORAGE_DENSE(y)->elements, FIX2INT(incy)) ? Qtrue : Qfalse;\n}\n\n/* Call any of the cblas_xgemm functions as directly as possible.\n *\n * The cblas_xgemm functions (dgemm, sgemm, cgemm, and zgemm) define the following operation:\n *\n *    C = alpha*op(A)*op(B) + beta*C\n *\n * where op(X) is one of <tt>op(X) = X</tt>, <tt>op(X) = X**T</tt>, or the complex conjugate of X.\n *\n * Note that this will only work for dense matrices that are of types :float32, :float64, :complex64, and :complex128.\n * Other types are not implemented in BLAS, and while they exist in NMatrix, this method is intended only to\n * expose the ultra-optimized ATLAS versions.\n *\n * == Arguments\n * See: http://www.netlib.org/blas/dgemm.f\n *\n * You probably don't want to call this function. Instead, why don't you try gemm, which is more flexible\n * with its arguments?\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n */\nstatic VALUE nm_atlas_cblas_gemm(VALUE self,\n                           VALUE order,\n                           VALUE trans_a, VALUE trans_b,\n                           VALUE m, VALUE n, VALUE k,\n                           VALUE alpha,\n                           VALUE a, VALUE lda,\n                           VALUE b, VALUE ldb,\n                           VALUE beta,\n                           VALUE c, VALUE ldc)\n{\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::math::atlas::cblas_gemm, void, const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_TRANSPOSE trans_b, int m, int n, int k, void* alpha, void* a, int lda, void* b, int ldb, void* beta, void* c, int ldc);\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n  void *pAlpha = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]),\n       *pBeta  = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n  rubyval_to_cval(alpha, dtype, pAlpha);\n  rubyval_to_cval(beta, dtype, pBeta);\n\n  ttable[dtype](blas_order_sym(order), blas_transpose_sym(trans_a), blas_transpose_sym(trans_b), FIX2INT(m), FIX2INT(n), FIX2INT(k), pAlpha, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), NM_STORAGE_DENSE(b)->elements, FIX2INT(ldb), pBeta, NM_STORAGE_DENSE(c)->elements, FIX2INT(ldc));\n\n  return c;\n}\n\n\nstatic VALUE nm_atlas_cblas_trsm(VALUE self,\n                           VALUE order,\n                           VALUE side, VALUE uplo,\n                           VALUE trans_a, VALUE diag,\n                           VALUE m, VALUE n,\n                           VALUE alpha,\n                           VALUE a, VALUE lda,\n                           VALUE b, VALUE ldb)\n{\n  static void (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER, const enum CBLAS_SIDE, const enum CBLAS_UPLO,\n                                        const enum CBLAS_TRANSPOSE, const enum CBLAS_DIAG,\n                                        const int m, const int n, const void* alpha, const void* a,\n                                        const int lda, void* b, const int ldb) = {\n      NULL, NULL, NULL, NULL, NULL, // integers not allowed due to division\n      nm::math::atlas::cblas_trsm<float>,\n      nm::math::atlas::cblas_trsm<double>,\n      cblas_ctrsm, cblas_ztrsm, // call directly, same function signature!\n      nm::math::atlas::cblas_trsm<nm::RubyObject>\n  };\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n  if (!ttable[dtype]) {\n    rb_raise(nm_eDataTypeError, \"this matrix operation undefined for integer matrices\");\n  } else {\n    void *pAlpha = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n    rubyval_to_cval(alpha, dtype, pAlpha);\n\n    ttable[dtype](blas_order_sym(order), blas_side_sym(side), blas_uplo_sym(uplo), blas_transpose_sym(trans_a), blas_diag_sym(diag), FIX2INT(m), FIX2INT(n), pAlpha, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), NM_STORAGE_DENSE(b)->elements, FIX2INT(ldb));\n  }\n\n  return Qtrue;\n}\n\nstatic VALUE nm_atlas_cblas_trmm(VALUE self,\n                           VALUE order,\n                           VALUE side, VALUE uplo,\n                           VALUE trans_a, VALUE diag,\n                           VALUE m, VALUE n,\n                           VALUE alpha,\n                           VALUE a, VALUE lda,\n                           VALUE b, VALUE ldb)\n{\n  static void (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER,\n                                        const enum CBLAS_SIDE, const enum CBLAS_UPLO,\n                                        const enum CBLAS_TRANSPOSE, const enum CBLAS_DIAG,\n                                        const int m, const int n, const void* alpha, const void* a,\n                                        const int lda, void* b, const int ldb) = {\n      NULL, NULL, NULL, NULL, NULL, // integers not allowed due to division\n      nm::math::atlas::cblas_trmm<float>,\n      nm::math::atlas::cblas_trmm<double>,\n      cblas_ctrmm, cblas_ztrmm, // call directly, same function signature!\n      NULL\n  };\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n  if (!ttable[dtype]) {\n    rb_raise(nm_eDataTypeError, \"this matrix operation not yet defined for non-BLAS dtypes\");\n  } else {\n    void *pAlpha = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n    rubyval_to_cval(alpha, dtype, pAlpha);\n\n    ttable[dtype](blas_order_sym(order), blas_side_sym(side), blas_uplo_sym(uplo), blas_transpose_sym(trans_a), blas_diag_sym(diag), FIX2INT(m), FIX2INT(n), pAlpha, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), NM_STORAGE_DENSE(b)->elements, FIX2INT(ldb));\n  }\n\n  return b;\n}\n\nstatic VALUE nm_atlas_cblas_syrk(VALUE self,\n                           VALUE order,\n                           VALUE uplo,\n                           VALUE trans,\n                           VALUE n, VALUE k,\n                           VALUE alpha,\n                           VALUE a, VALUE lda,\n                           VALUE beta,\n                           VALUE c, VALUE ldc)\n{\n  static void (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER, const enum CBLAS_UPLO, const enum CBLAS_TRANSPOSE,\n                                        const int n, const int k, const void* alpha, const void* a,\n                                        const int lda, const void* beta, void* c, const int ldc) = {\n      NULL, NULL, NULL, NULL, NULL, // integers not allowed due to division\n      nm::math::atlas::cblas_syrk<float>,\n      nm::math::atlas::cblas_syrk<double>,\n      cblas_csyrk, cblas_zsyrk, // call directly, same function signature!\n      NULL\n  };\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n  if (!ttable[dtype]) {\n    rb_raise(nm_eDataTypeError, \"this matrix operation undefined for integer matrices\");\n  } else {\n    void *pAlpha = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]),\n         *pBeta = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n    rubyval_to_cval(alpha, dtype, pAlpha);\n    rubyval_to_cval(beta, dtype, pBeta);\n\n    ttable[dtype](blas_order_sym(order), blas_uplo_sym(uplo), blas_transpose_sym(trans), FIX2INT(n), FIX2INT(k), pAlpha, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), pBeta, NM_STORAGE_DENSE(c)->elements, FIX2INT(ldc));\n  }\n\n  return Qtrue;\n}\n\nstatic VALUE nm_atlas_cblas_herk(VALUE self,\n                           VALUE order,\n                           VALUE uplo,\n                           VALUE trans,\n                           VALUE n, VALUE k,\n                           VALUE alpha,\n                           VALUE a, VALUE lda,\n                           VALUE beta,\n                           VALUE c, VALUE ldc)\n{\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n  if (dtype == nm::COMPLEX64) {\n    cblas_cherk(blas_order_sym(order), blas_uplo_sym(uplo), blas_transpose_sym(trans), FIX2INT(n), FIX2INT(k), NUM2DBL(alpha), NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), NUM2DBL(beta), NM_STORAGE_DENSE(c)->elements, FIX2INT(ldc));\n  } else if (dtype == nm::COMPLEX128) {\n    cblas_zherk(blas_order_sym(order), blas_uplo_sym(uplo), blas_transpose_sym(trans), FIX2INT(n), FIX2INT(k), NUM2DBL(alpha), NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), NUM2DBL(beta), NM_STORAGE_DENSE(c)->elements, FIX2INT(ldc));\n  } else\n    rb_raise(rb_eNotImpError, \"this matrix operation undefined for non-complex dtypes\");\n  return Qtrue;\n}\n\n/*\n * Function signature conversion for calling CBLAS' gesvd functions as directly as possible.\n *\n * xGESVD computes the singular value decomposition (SVD) of a real\n * M-by-N matrix A, optionally computing the left and/or right singular\n * vectors. The SVD is written\n *\n *      A = U * SIGMA * transpose(V)\n *\n * where SIGMA is an M-by-N matrix which is zero except for its\n * min(m,n) diagonal elements, U is an M-by-M orthogonal matrix, and\n * V is an N-by-N orthogonal matrix.  The diagonal elements of SIGMA\n * are the singular values of A; they are real and non-negative, and\n * are returned in descending order.  The first min(m,n) columns of\n * U and V are the left and right singular vectors of A.\n *\n * Note that the routine returns V**T, not V.\n */\nstatic VALUE nm_atlas_lapack_gesvd(VALUE self, VALUE jobu, VALUE jobvt, VALUE m, VALUE n, VALUE a, VALUE lda, VALUE s, VALUE u, VALUE ldu, VALUE vt, VALUE ldvt, VALUE lwork) {\n  static int (*gesvd_table[nm::NUM_DTYPES])(char, char, int, int, void* a, int, void* s, void* u, int, void* vt, int, void* work, int, void* rwork) = {\n    NULL, NULL, NULL, NULL, NULL, // no integer ops\n    nm::math::atlas::lapack_gesvd<float,float>,\n    nm::math::atlas::lapack_gesvd<double,double>,\n    nm::math::atlas::lapack_gesvd<nm::Complex64,float>,\n    nm::math::atlas::lapack_gesvd<nm::Complex128,double>,\n    NULL // no Ruby objects\n  };\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n\n  if (!gesvd_table[dtype]) {\n    rb_raise(rb_eNotImpError, \"this operation not yet implemented for non-BLAS dtypes\");\n    return Qfalse;\n  } else {\n    int M = FIX2INT(m),\n        N = FIX2INT(n);\n\n    int min_mn  = NM_MIN(M,N);\n    int max_mn  = NM_MAX(M,N);\n\n    char JOBU = lapack_svd_job_sym(jobu),\n         JOBVT = lapack_svd_job_sym(jobvt);\n\n    // only need rwork for complex matrices\n    int rwork_size  = (dtype == nm::COMPLEX64 || dtype == nm::COMPLEX128) ? 5 * min_mn : 0;\n    void* rwork     = rwork_size > 0 ? NM_ALLOCA_N(char, DTYPE_SIZES[dtype] * rwork_size) : NULL;\n    int work_size   = FIX2INT(lwork);\n\n    // ignore user argument for lwork if it's too small.\n    work_size       = NM_MAX((dtype == nm::COMPLEX64 || dtype == nm::COMPLEX128 ? 2 * min_mn + max_mn : NM_MAX(3*min_mn + max_mn, 5*min_mn)), work_size);\n    void* work      = NM_ALLOCA_N(char, DTYPE_SIZES[dtype] * work_size);\n\n    int info = gesvd_table[dtype](JOBU, JOBVT, M, N, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda),\n      NM_STORAGE_DENSE(s)->elements, NM_STORAGE_DENSE(u)->elements, FIX2INT(ldu), NM_STORAGE_DENSE(vt)->elements, FIX2INT(ldvt),\n      work, work_size, rwork);\n    return INT2FIX(info);\n  }\n}\n\n/*\n * Function signature conversion for calling CBLAS' gesdd functions as directly as possible.\n *\n * xGESDD uses a divide-and-conquer strategy to compute the singular value decomposition (SVD) of a real\n * M-by-N matrix A, optionally computing the left and/or right singular\n * vectors. The SVD is written\n *\n *      A = U * SIGMA * transpose(V)\n *\n * where SIGMA is an M-by-N matrix which is zero except for its\n * min(m,n) diagonal elements, U is an M-by-M orthogonal matrix, and\n * V is an N-by-N orthogonal matrix.  The diagonal elements of SIGMA\n * are the singular values of A; they are real and non-negative, and\n * are returned in descending order.  The first min(m,n) columns of\n * U and V are the left and right singular vectors of A.\n *\n * Note that the routine returns V**T, not V.\n */\nstatic VALUE nm_atlas_lapack_gesdd(VALUE self, VALUE jobz, VALUE m, VALUE n, VALUE a, VALUE lda, VALUE s, VALUE u, VALUE ldu, VALUE vt, VALUE ldvt, VALUE lwork) {\n  static int (*gesdd_table[nm::NUM_DTYPES])(char, int, int, void* a, int, void* s, void* u, int, void* vt, int, void* work, int, int* iwork, void* rwork) = {\n    NULL, NULL, NULL, NULL, NULL, // no integer ops\n    nm::math::atlas::lapack_gesdd<float,float>,\n    nm::math::atlas::lapack_gesdd<double,double>,\n    nm::math::atlas::lapack_gesdd<nm::Complex64,float>,\n    nm::math::atlas::lapack_gesdd<nm::Complex128,double>,\n    NULL // no Ruby objects\n  };\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n  if (!gesdd_table[dtype]) {\n    rb_raise(rb_eNotImpError, \"this operation not yet implemented for non-BLAS dtypes\");\n    return Qfalse;\n  } else {\n    int M = FIX2INT(m),\n        N = FIX2INT(n);\n\n    int min_mn  = NM_MIN(M,N);\n    int max_mn  = NM_MAX(M,N);\n\n    char JOBZ = lapack_svd_job_sym(jobz);\n\n    // only need rwork for complex matrices\n    void* rwork = NULL;\n\n    int work_size = FIX2INT(lwork); // Make sure we allocate enough work, regardless of the user request.\n    if (dtype == nm::COMPLEX64 || dtype == nm::COMPLEX128) {\n      int rwork_size = min_mn * (JOBZ == 'N' ? 5 : NM_MAX(5*min_mn + 7, 2*max_mn + 2*min_mn + 1));\n      rwork = NM_ALLOCA_N(char, DTYPE_SIZES[dtype] * rwork_size);\n\n      if (JOBZ == 'N')      work_size = NM_MAX(work_size, 3*min_mn + NM_MAX(max_mn, 6*min_mn));\n      else if (JOBZ == 'O') work_size = NM_MAX(work_size, 3*min_mn*min_mn + NM_MAX(max_mn, 5*min_mn*min_mn + 4*min_mn));\n      else                  work_size = NM_MAX(work_size, 3*min_mn*min_mn + NM_MAX(max_mn, 4*min_mn*min_mn + 4*min_mn));\n    } else {\n      if (JOBZ == 'N')      work_size = NM_MAX(work_size, 2*min_mn + max_mn);\n      else if (JOBZ == 'O') work_size = NM_MAX(work_size, 2*min_mn*min_mn + max_mn + 2*min_mn);\n      else                  work_size = NM_MAX(work_size, min_mn*min_mn + max_mn + 2*min_mn);\n    }\n    void* work  = NM_ALLOCA_N(char, DTYPE_SIZES[dtype] * work_size);\n    int* iwork  = NM_ALLOCA_N(int, 8*min_mn);\n\n    int info = gesdd_table[dtype](JOBZ, M, N, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda),\n      NM_STORAGE_DENSE(s)->elements, NM_STORAGE_DENSE(u)->elements, FIX2INT(ldu), NM_STORAGE_DENSE(vt)->elements, FIX2INT(ldvt),\n      work, work_size, iwork, rwork);\n    return INT2FIX(info);\n  }\n}\n\n/*\n * Function signature conversion for calling CBLAS' geev functions as directly as possible.\n *\n * GEEV computes for an N-by-N real nonsymmetric matrix A, the\n * eigenvalues and, optionally, the left and/or right eigenvectors.\n *\n * The right eigenvector v(j) of A satisfies\n *                    A * v(j) = lambda(j) * v(j)\n * where lambda(j) is its eigenvalue.\n *\n * The left eigenvector u(j) of A satisfies\n *                 u(j)**H * A = lambda(j) * u(j)**H\n * where u(j)**H denotes the conjugate transpose of u(j).\n *\n * The computed eigenvectors are normalized to have Euclidean norm\n * equal to 1 and largest component real.\n */\nstatic VALUE nm_atlas_lapack_geev(VALUE self, VALUE compute_left, VALUE compute_right, VALUE n, VALUE a, VALUE lda, VALUE w, VALUE wi, VALUE vl, VALUE ldvl, VALUE vr, VALUE ldvr, VALUE lwork) {\n  static int (*geev_table[nm::NUM_DTYPES])(char, char, int, void* a, int, void* w, void* wi, void* vl, int, void* vr, int, void* work, int, void* rwork) = {\n    NULL, NULL, NULL, NULL, NULL, // no integer ops\n    nm::math::atlas::lapack_geev<float,float>,\n    nm::math::atlas::lapack_geev<double,double>,\n    nm::math::atlas::lapack_geev<nm::Complex64,float>,\n    nm::math::atlas::lapack_geev<nm::Complex128,double>,\n    NULL // no Ruby objects\n  };\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n\n  if (!geev_table[dtype]) {\n    rb_raise(rb_eNotImpError, \"this operation not yet implemented for non-BLAS dtypes\");\n    return Qfalse;\n  } else {\n    int N = FIX2INT(n);\n\n    char JOBVL = lapack_evd_job_sym(compute_left),\n         JOBVR = lapack_evd_job_sym(compute_right);\n\n    void* A  = NM_STORAGE_DENSE(a)->elements;\n    void* WR = NM_STORAGE_DENSE(w)->elements;\n    void* WI = wi == Qnil ? NULL : NM_STORAGE_DENSE(wi)->elements;\n    void* VL = JOBVL == 'V' ? NM_STORAGE_DENSE(vl)->elements : NULL;\n    void* VR = JOBVR == 'V' ? NM_STORAGE_DENSE(vr)->elements : NULL;\n\n    // only need rwork for complex matrices (wi == Qnil for complex)\n    int rwork_size  = dtype == nm::COMPLEX64 || dtype == nm::COMPLEX128 ? N * DTYPE_SIZES[dtype] : 0; // 2*N*floattype for complex only, otherwise 0\n    void* rwork     = rwork_size > 0 ? NM_ALLOCA_N(char, rwork_size) : NULL;\n    int work_size   = FIX2INT(lwork);\n    void* work;\n\n    int info;\n\n    // if work size is 0 or -1, query.\n    if (work_size <= 0) {\n      work_size = -1;\n      work = NM_ALLOC_N(char, DTYPE_SIZES[dtype]); //2*N * DTYPE_SIZES[dtype]);\n      info = geev_table[dtype](JOBVL, JOBVR, N, A, FIX2INT(lda), WR, WI, VL, FIX2INT(ldvl), VR, FIX2INT(ldvr), work, work_size, rwork);\n      work_size = (int)(dtype == nm::COMPLEX64 || dtype == nm::FLOAT32 ? reinterpret_cast<float*>(work)[0] : reinterpret_cast<double*>(work)[0]);\n      // line above is basically: work_size = (int)(work[0]); // now have new work_size\n      NM_FREE(work);\n      if (info == 0)\n        rb_warn(\"geev: calculated optimal lwork of %d; to eliminate this message, use a positive value for lwork (at least 2*shape[i])\", work_size);\n      else return INT2FIX(info); // error of some kind on query!\n    }\n\n    // if work size is < 2*N, just set it to 2*N\n    if (work_size < 2*N) work_size = 2*N;\n    if (work_size < 3*N && (dtype == nm::FLOAT32 || dtype == nm::FLOAT64)) {\n      work_size = JOBVL == 'V' || JOBVR == 'V' ? 4*N : 3*N;\n    }\n\n    // Allocate work array for actual run\n    work = NM_ALLOCA_N(char, work_size * DTYPE_SIZES[dtype]);\n\n    // Perform the actual calculation.\n    info = geev_table[dtype](JOBVL, JOBVR, N, A, FIX2INT(lda), WR, WI, VL, FIX2INT(ldvl), VR, FIX2INT(ldvr), work, work_size, rwork);\n\n    return INT2FIX(info);\n  }\n}\n\n/* Call any of the clapack_xgetrf functions as directly as possible.\n *\n * The clapack_getrf functions (dgetrf, sgetrf, cgetrf, and zgetrf) compute an LU factorization of a general M-by-N\n * matrix A using partial pivoting with row interchanges.\n *\n * The factorization has the form:\n *    A = P * L * U\n * where P is a permutation matrix, L is lower triangular with unit diagonal elements (lower trapezoidal if m > n),\n * and U is upper triangular (upper trapezoidal if m < n).\n *\n * This is the right-looking level 3 BLAS version of the algorithm.\n *\n * == Arguments\n * See: http://www.netlib.org/lapack/double/dgetrf.f\n * (You don't need argument 5; this is the value returned by this function.)\n *\n * You probably don't want to call this function. Instead, why don't you try clapack_getrf, which is more flexible\n * with its arguments?\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n *\n * Returns an array giving the pivot indices (normally these are argument #5).\n */\nstatic VALUE nm_atlas_clapack_getrf(VALUE self, VALUE order, VALUE m, VALUE n, VALUE a, VALUE lda) {\n  static int (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER, const int m, const int n, void* a, const int lda, int* ipiv) = {\n      NULL, NULL, NULL, NULL, NULL, // integers not allowed due to division\n      nm::math::atlas::clapack_getrf<float>,\n      nm::math::atlas::clapack_getrf<double>,\n#if defined (HAVE_CLAPACK_H) || defined (HAVE_ATLAS_CLAPACK_H)\n      clapack_cgetrf, clapack_zgetrf, // call directly, same function signature!\n#else // Especially important for Mac OS, which doesn't seem to include the ATLAS clapack interface.\n      nm::math::atlas::clapack_getrf<nm::Complex64>,\n      nm::math::atlas::clapack_getrf<nm::Complex128>,\n#endif\n      nm::math::atlas::clapack_getrf<nm::RubyObject>\n  };\n\n  int M = FIX2INT(m),\n      N = FIX2INT(n);\n\n  // Allocate the pivot index array, which is of size MIN(M, N).\n  size_t ipiv_size = std::min(M,N);\n  int* ipiv = NM_ALLOCA_N(int, ipiv_size);\n\n  if (!ttable[NM_DTYPE(a)]) {\n    rb_raise(nm_eDataTypeError, \"this matrix operation undefined for integer matrices\");\n  } else {\n    // Call either our version of getrf or the LAPACK version.\n    ttable[NM_DTYPE(a)](blas_order_sym(order), M, N, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), ipiv);\n  }\n\n  // Result will be stored in a. We return ipiv as an array.\n  VALUE ipiv_array = rb_ary_new2(ipiv_size);\n  for (size_t i = 0; i < ipiv_size; ++i) {\n    rb_ary_store(ipiv_array, i, INT2FIX(ipiv[i]));\n  }\n\n  return ipiv_array;\n}\n\n\n/* Call any of the clapack_xpotrf functions as directly as possible.\n *\n * You probably don't want to call this function. Instead, why don't you try clapack_potrf, which is more flexible\n * with its arguments?\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n */\nstatic VALUE nm_atlas_clapack_potrf(VALUE self, VALUE order, VALUE uplo, VALUE n, VALUE a, VALUE lda) {\n#if !defined(HAVE_CLAPACK_H) && !defined(HAVE_ATLAS_CLAPACK_H)\n  rb_raise(rb_eNotImpError, \"potrf currently requires CLAPACK\");\n#endif\n\n  static int (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER, const enum CBLAS_UPLO, const int n, void* a, const int lda) = {\n      NULL, NULL, NULL, NULL, NULL, // integers not allowed due to division\n      nm::math::atlas::clapack_potrf<float>,\n      nm::math::atlas::clapack_potrf<double>,\n#if defined (HAVE_CLAPACK_H) || defined (HAVE_ATLAS_CLAPACK_H)\n      clapack_cpotrf, clapack_zpotrf, // call directly, same function signature!\n#else // Especially important for Mac OS, which doesn't seem to include the ATLAS clapack interface.\n      nm::math::atlas::clapack_potrf<nm::Complex64>,\n      nm::math::atlas::clapack_potrf<nm::Complex128>,\n#endif\n      NULL\n  };\n\n  if (!ttable[NM_DTYPE(a)]) {\n    rb_raise(rb_eNotImpError, \"this operation not yet implemented for non-BLAS dtypes\");\n    // FIXME: Once BLAS dtypes are implemented, replace error above with the error below.\n    //rb_raise(nm_eDataTypeError, \"this matrix operation undefined for integer matrices\");\n  } else {\n    // Call either our version of potrf or the LAPACK version.\n    ttable[NM_DTYPE(a)](blas_order_sym(order), blas_uplo_sym(uplo), FIX2INT(n), NM_STORAGE_DENSE(a)->elements, FIX2INT(lda));\n  }\n\n  return a;\n}\n\n\n/*\n * Call any of the clapack_xgetrs functions as directly as possible.\n */\nstatic VALUE nm_atlas_clapack_getrs(VALUE self, VALUE order, VALUE trans, VALUE n, VALUE nrhs, VALUE a, VALUE lda, VALUE ipiv, VALUE b, VALUE ldb) {\n  static int (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE Trans, const int N,\n                                       const int NRHS, const void* A, const int lda, const int* ipiv, void* B,\n                                       const int ldb) = {\n      NULL, NULL, NULL, NULL, NULL, // integers not allowed due to division\n      nm::math::atlas::clapack_getrs<float>,\n      nm::math::atlas::clapack_getrs<double>,\n#if defined (HAVE_CLAPACK_H) || defined (HAVE_ATLAS_CLAPACK_H)\n      clapack_cgetrs, clapack_zgetrs, // call directly, same function signature!\n#else // Especially important for Mac OS, which doesn't seem to include the ATLAS clapack interface.\n      nm::math::atlas::clapack_getrs<nm::Complex64>,\n      nm::math::atlas::clapack_getrs<nm::Complex128>,\n#endif\n      nm::math::atlas::clapack_getrs<nm::RubyObject>\n  };\n\n  // Allocate the C version of the pivot index array\n  int* ipiv_;\n  if (TYPE(ipiv) != T_ARRAY) {\n    rb_raise(rb_eArgError, \"ipiv must be of type Array\");\n  } else {\n    ipiv_ = NM_ALLOCA_N(int, RARRAY_LEN(ipiv));\n    for (int index = 0; index < RARRAY_LEN(ipiv); ++index) {\n      ipiv_[index] = FIX2INT( RARRAY_AREF(ipiv, index) );\n    }\n  }\n\n  if (!ttable[NM_DTYPE(a)]) {\n    rb_raise(nm_eDataTypeError, \"this matrix operation undefined for integer matrices\");\n  } else {\n\n    // Call either our version of getrs or the LAPACK version.\n    ttable[NM_DTYPE(a)](blas_order_sym(order), blas_transpose_sym(trans), FIX2INT(n), FIX2INT(nrhs), NM_STORAGE_DENSE(a)->elements, FIX2INT(lda),\n                        ipiv_, NM_STORAGE_DENSE(b)->elements, FIX2INT(ldb));\n  }\n\n  // b is both returned and modified directly in the argument list.\n  return b;\n}\n\n\n/*\n * Call any of the clapack_xpotrs functions as directly as possible.\n */\nstatic VALUE nm_atlas_clapack_potrs(VALUE self, VALUE order, VALUE uplo, VALUE n, VALUE nrhs, VALUE a, VALUE lda, VALUE b, VALUE ldb) {\n  static int (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const int N,\n                                       const int NRHS, const void* A, const int lda, void* B, const int ldb) = {\n      NULL, NULL, NULL, NULL, NULL, // integers not allowed due to division\n      nm::math::atlas::clapack_potrs<float>,\n      nm::math::atlas::clapack_potrs<double>,\n#if defined (HAVE_CLAPACK_H) || defined (HAVE_ATLAS_CLAPACK_H)\n      clapack_cpotrs, clapack_zpotrs, // call directly, same function signature!\n#else // Especially important for Mac OS, which doesn't seem to include the ATLAS clapack interface.\n      nm::math::atlas::clapack_potrs<nm::Complex64>,\n      nm::math::atlas::clapack_potrs<nm::Complex128>,\n#endif\n      nm::math::atlas::clapack_potrs<nm::RubyObject>\n  };\n\n\n  if (!ttable[NM_DTYPE(a)]) {\n    rb_raise(nm_eDataTypeError, \"this matrix operation undefined for integer matrices\");\n  } else {\n\n    // Call either our version of potrs or the LAPACK version.\n    ttable[NM_DTYPE(a)](blas_order_sym(order), blas_uplo_sym(uplo), FIX2INT(n), FIX2INT(nrhs), NM_STORAGE_DENSE(a)->elements, FIX2INT(lda),\n                        NM_STORAGE_DENSE(b)->elements, FIX2INT(ldb));\n  }\n\n  // b is both returned and modified directly in the argument list.\n  return b;\n}\n\n/* Call any of the clapack_xgetri functions as directly as possible.\n *\n * You probably don't want to call this function. Instead, why don't you try clapack_getri, which is more flexible\n * with its arguments?\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n *\n * Returns an array giving the pivot indices (normally these are argument #5).\n */\nstatic VALUE nm_atlas_clapack_getri(VALUE self, VALUE order, VALUE n, VALUE a, VALUE lda, VALUE ipiv) {\n#if !defined (HAVE_CLAPACK_H) && !defined (HAVE_ATLAS_CLAPACK_H)\n  rb_raise(rb_eNotImpError, \"getri currently requires CLAPACK\");\n#endif\n\n  static int (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER, const int n, void* a, const int lda, const int* ipiv) = {\n      NULL, NULL, NULL, NULL, NULL, // integers not allowed due to division\n      nm::math::atlas::clapack_getri<float>,\n      nm::math::atlas::clapack_getri<double>,\n#if defined (HAVE_CLAPACK_H) || defined (HAVE_ATLAS_CLAPACK_H)\n      clapack_cgetri, clapack_zgetri, // call directly, same function signature!\n#else // Especially important for Mac OS, which doesn't seem to include the ATLAS clapack interface.\n      nm::math::atlas::clapack_getri<nm::Complex64>,\n      nm::math::atlas::clapack_getri<nm::Complex128>,\n#endif\n      NULL\n  };\n\n  // Allocate the C version of the pivot index array\n  int* ipiv_;\n  if (TYPE(ipiv) != T_ARRAY) {\n    rb_raise(rb_eArgError, \"ipiv must be of type Array\");\n  } else {\n    ipiv_ = NM_ALLOCA_N(int, RARRAY_LEN(ipiv));\n    for (int index = 0; index < RARRAY_LEN(ipiv); ++index) {\n      ipiv_[index] = FIX2INT( RARRAY_AREF(ipiv, index) );\n    }\n  }\n\n  if (!ttable[NM_DTYPE(a)]) {\n    rb_raise(rb_eNotImpError, \"this operation not yet implemented for non-BLAS dtypes\");\n    // FIXME: Once non-BLAS dtypes are implemented, replace error above with the error below.\n    //rb_raise(nm_eDataTypeError, \"this matrix operation undefined for integer matrices\");\n  } else {\n    // Call either our version of getri or the LAPACK version.\n    ttable[NM_DTYPE(a)](blas_order_sym(order), FIX2INT(n), NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), ipiv_);\n  }\n\n  return a;\n}\n\n\n/* Call any of the clapack_xpotri functions as directly as possible.\n *\n * You probably don't want to call this function. Instead, why don't you try clapack_potri, which is more flexible\n * with its arguments?\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n */\nstatic VALUE nm_atlas_clapack_potri(VALUE self, VALUE order, VALUE uplo, VALUE n, VALUE a, VALUE lda) {\n#if !defined (HAVE_CLAPACK_H) && !defined (HAVE_ATLAS_CLAPACK_H)\n  rb_raise(rb_eNotImpError, \"getri currently requires CLAPACK\");\n#endif\n\n  static int (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER, const enum CBLAS_UPLO, const int n, void* a, const int lda) = {\n      NULL, NULL, NULL, NULL, NULL, // integers not allowed due to division\n      nm::math::atlas::clapack_potri<float>,\n      nm::math::atlas::clapack_potri<double>,\n#if defined (HAVE_CLAPACK_H) || defined (HAVE_ATLAS_CLAPACK_H)\n      clapack_cpotri, clapack_zpotri, // call directly, same function signature!\n#else // Especially important for Mac OS, which doesn't seem to include the ATLAS clapack interface.\n      nm::math::atlas::clapack_potri<nm::Complex64>,\n      nm::math::atlas::clapack_potri<nm::Complex128>,\n#endif\n      NULL\n  };\n\n  if (!ttable[NM_DTYPE(a)]) {\n    rb_raise(rb_eNotImpError, \"this operation not yet implemented for non-BLAS dtypes\");\n    // FIXME: Once BLAS dtypes are implemented, replace error above with the error below.\n    //rb_raise(nm_eDataTypeError, \"this matrix operation undefined for integer matrices\");\n  } else {\n    // Call either our version of getri or the LAPACK version.\n    ttable[NM_DTYPE(a)](blas_order_sym(order), blas_uplo_sym(uplo), FIX2INT(n), NM_STORAGE_DENSE(a)->elements, FIX2INT(lda));\n  }\n\n  return a;\n}\n\n\n/*\n * Call any of the clapack_xlaswp functions as directly as possible.\n *\n * Note that LAPACK's xlaswp functions accept a column-order matrix, but NMatrix uses row-order. Thus, n should be the\n * number of rows and lda should be the number of columns, no matter what it says in the documentation for dlaswp.f.\n */\nstatic VALUE nm_atlas_clapack_laswp(VALUE self, VALUE n, VALUE a, VALUE lda, VALUE k1, VALUE k2, VALUE ipiv, VALUE incx) {\n  //We have actually never used the ATLAS version of laswp. For the time being\n  //I will leave it like that and just always call the internal implementation.\n  //I don't know if there is a good reason for this or not.\n  //Maybe because our internal version swaps columns instead of rows.\n  static void (*ttable[nm::NUM_DTYPES])(const int n, void* a, const int lda, const int k1, const int k2, const int* ipiv, const int incx) = {\n      nm::math::clapack_laswp<uint8_t>,\n      nm::math::clapack_laswp<int8_t>,\n      nm::math::clapack_laswp<int16_t>,\n      nm::math::clapack_laswp<int32_t>,\n      nm::math::clapack_laswp<int64_t>,\n      nm::math::clapack_laswp<float>,\n      nm::math::clapack_laswp<double>,\n      nm::math::clapack_laswp<nm::Complex64>,\n      nm::math::clapack_laswp<nm::Complex128>,\n      nm::math::clapack_laswp<nm::RubyObject>\n  };\n\n  // Allocate the C version of the pivot index array\n  int* ipiv_;\n  if (TYPE(ipiv) != T_ARRAY) {\n    rb_raise(rb_eArgError, \"ipiv must be of type Array\");\n  } else {\n    ipiv_ = NM_ALLOCA_N(int, RARRAY_LEN(ipiv));\n    for (int index = 0; index < RARRAY_LEN(ipiv); ++index) {\n      ipiv_[index] = FIX2INT( RARRAY_AREF(ipiv, index) );\n    }\n  }\n\n  // Call either our version of laswp or the LAPACK version.\n  ttable[NM_DTYPE(a)](FIX2INT(n), NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), FIX2INT(k1), FIX2INT(k2), ipiv_, FIX2INT(incx));\n\n  // a is both returned and modified directly in the argument list.\n  return a;\n}\n\n\n}\n"
  },
  {
    "path": "ext/nmatrix_atlas/nmatrix_atlas.cpp",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == nmatrix_atlas.cpp\n//\n// Main file for nmatrix_atlas extension\n//\n\n#include <ruby.h>\n\n#include \"nmatrix.h\"\n\n#include \"math_atlas/inc.h\"\n\n#include \"data/data.h\"\n\nextern \"C\" {\nvoid nm_math_init_atlas(); \n\nvoid Init_nmatrix_atlas() {\n  nm_math_init_atlas();\n}\n\n}\n"
  },
  {
    "path": "ext/nmatrix_fftw/extconf.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == nmatrix_fftw/extconf.rb\n#\n# This file checks FFTW3 and other necessary headers/shared objects.\n\nrequire 'nmatrix/mkmf'\n\nfftw_libdir = RbConfig::CONFIG['libdir']\nfftw_incdir = RbConfig::CONFIG['includedir']\nfftw_srcdir = RbConfig::CONFIG['srcdir']\n\n$CFLAGS = [\"-Wall -Werror=return-type -I$(srcdir)/../nmatrix -I$(srcdir)/lapacke/include\",$CFLAGS].join(\" \")\n$CXXFLAGS = [\"-Wall -Werror=return-type -I$(srcdir)/../nmatrix -I$(srcdir)/lapacke/include -std=c++11\",$CXXFLAGS].join(\" \")\n$CPPFLAGS = [\"-Wall -Werror=return-type -I$(srcdir)/../nmatrix -I$(srcdir)/lapacke/include -std=c++11\",$CPPFLAGS].join(\" \")\n\nflags = \" --include=#{fftw_incdir} --libdir=#{fftw_libdir}\"\n\nif have_library(\"fftw3\")\n  $CFLAGS += [\" -lfftw3 -lm #{$CFLAGS} #{$flags}\"].join(\" \")\n  dir_config('nmatrix_fftw', fftw_incdir, fftw_libdir)\n  dir_config('nmatrix_fftw')\nend\n\ncreate_conf_h(\"nmatrix_fftw_config.h\")\ncreate_makefile(\"nmatrix_fftw\")\n\n# to clean up object files in subdirectories:\nopen('Makefile', 'a') do |f|\n  clean_objs_paths = %w{ }.map { |d| \"#{d}/*.#{CONFIG[\"OBJEXT\"]}\" }\n  f.write(\"CLEANOBJS := $(CLEANOBJS) #{clean_objs_paths.join(' ')}\")\nend\n"
  },
  {
    "path": "ext/nmatrix_fftw/nmatrix_fftw.cpp",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == nmatrix_fftw.cpp\n//\n// Main file for nmatrix_fftw extension\n//\n\n#include <ruby.h>\n#include <complex.h>\n#include <fftw3.h>\n#include \"storage/common.h\"\n#include \"nmatrix.h\"\n#include <iostream>\n\n#define TYPE_COMPLEX_COMPLEX 0\n#define TYPE_REAL_COMPLEX    1\n#define TYPE_COMPLEX_REAL    2\n#define TYPE_REAL_REAL       3\n\n// @private Used internally by the C API.\nstatic VALUE cNMatrix_FFTW_Plan_Data;\n\n// @private Used internally by the C API.\n//\n// ADT for encapsulating various data structures required for sucessfully planning\n//   and executing a fourier transform with FFTW. Uses void* pointers because \n//   input/output can be either double or fftw_complex depending on the type of\n//   FFT being planned.\nstruct fftw_data {\n  void* input; \n  void* output;\n  fftw_plan plan;\n};\n\n// @private Used internally by the C API.\n// Method used by Ruby GC for freeing memory allocated by FFTW.\nstatic void nm_fftw_cleanup(fftw_data* d)\n{\n  xfree(d->input);\n  xfree(d->output);\n  fftw_destroy_plan(d->plan);\n  xfree(d);\n}\n\n// @private Used internally by the C API.\n// Used for converting a Ruby Array containing the shape to a C++ array of ints.\nstatic int* nm_fftw_interpret_shape(VALUE rb_shape, const int dimensions)\n{\n  Check_Type(rb_shape, T_ARRAY);\n\n  int *shape = new int[dimensions];\n  const VALUE *arr = RARRAY_CONST_PTR(rb_shape);\n\n  for (int i = 0; i < dimensions; ++i) {\n    shape[i] = FIX2INT(arr[i]);\n  }\n\n  return shape;\n}\n\n// @private Used internally by the C API.\n// Convert values passed in Ruby Array containing kinds of real-real transforms \n//   to a C array of ints. \nstatic void\nnm_fftw_interpret_real_real_kind(VALUE real_real_kind, int *r2r_kinds)\n{\n  int size = RARRAY_LEN(real_real_kind);\n  const VALUE *a = RARRAY_CONST_PTR(real_real_kind);\n  for (int i = 0; i < size; ++i) { \n    r2r_kinds[i] = FIX2INT(a[i]); \n  }\n}\n\n// @private Used internally by the C API.\n// Actually calls the FFTW planner routines based on the input/output and the\n//   type of routine selected. Also allocates memory for input and output pointers.\nstatic void nm_fftw_actually_create_plan(fftw_data* data, \n  size_t size, const int dimensions, const int* shape, int sign, unsigned flags, \n  VALUE rb_type, VALUE real_real_kind)\n{\n  switch (FIX2INT(rb_type))\n  {\n    case TYPE_COMPLEX_COMPLEX:\n      data->input  = ALLOC_N(fftw_complex, size);\n      data->output = ALLOC_N(fftw_complex, size);\n      data->plan   = fftw_plan_dft(dimensions, shape, (fftw_complex*)data->input, \n        (fftw_complex*)data->output, sign, flags);\n      break;\n    case TYPE_REAL_COMPLEX:\n      data->input  = ALLOC_N(double      , size);\n      data->output = ALLOC_N(fftw_complex, size);\n      data->plan   = fftw_plan_dft_r2c(dimensions, shape, (double*)data->input, \n        (fftw_complex*)data->output, flags);\n      break;\n    case TYPE_COMPLEX_REAL:\n      data->input  = ALLOC_N(fftw_complex,  size);\n      data->output = ALLOC_N(double      ,  size);\n      data->plan   = fftw_plan_dft_c2r(dimensions, shape, (fftw_complex*)data->input, \n        (double*)data->output, flags);\n      break;\n    case TYPE_REAL_REAL:\n      int* r2r_kinds = ALLOC_N(int, FIX2INT(real_real_kind));\n      nm_fftw_interpret_real_real_kind(real_real_kind, r2r_kinds);\n      data->input  = ALLOC_N(double, size);\n      data->output = ALLOC_N(double, size);\n      data->plan   = fftw_plan_r2r(dimensions, shape, (double*)data->input, \n        (double*)data->output, (fftw_r2r_kind*)r2r_kinds, flags);\n      xfree(r2r_kinds);\n      break;\n  }\n}\n\n/** \\brief Create a plan for performing the fourier transform based on input,\n * output pointers and the underlying hardware.\n *\n * @param[in] self          Object on which the function is called\n * @param[in] rb_shape      Shape of the plan.\n * @param[in] rb_size       Size of the plan.\n * @param[in] rb_dim        Dimension of the FFT to be performed.\n * @param[in] rb_flags      Number denoting the planner flags.\n * @param[in] rb_direction  Direction of FFT (can be -1 or +1). Specifies the\n *   sign of the exponent.\n * @param[in] rb_type       Number specifying the type of FFT being planned (one\n *    of :complex_complex, :complex_real, :real_complex and :real_real)\n * @param[in] rb_real_real_kind    Ruby Array specifying the kind of DFT to perform over\n *   each axis in case of a real input/real output FFT.\n *\n * \\returns An object of type NMatrix::FFTW::Plan::Data that encapsulates the\n * plan and relevant input/output arrays.\n */\nstatic VALUE nm_fftw_create_plan(VALUE self, VALUE rb_shape, VALUE rb_size,\n  VALUE rb_dim, VALUE rb_flags, VALUE rb_direction, VALUE rb_type, VALUE rb_real_real_kind)\n{ \n  const int dimensions = FIX2INT(rb_dim);\n  const int* shape     = nm_fftw_interpret_shape(rb_shape, dimensions);\n  size_t size          = FIX2INT(rb_size);\n  int sign             = FIX2INT(rb_direction);\n  unsigned flags       = FIX2INT(rb_flags);\n  fftw_data *data      = ALLOC(fftw_data);\n\n  nm_fftw_actually_create_plan(data, size, dimensions, shape, \n    sign, flags, rb_type, rb_real_real_kind);\n  \n  return Data_Wrap_Struct(cNMatrix_FFTW_Plan_Data, NULL, nm_fftw_cleanup, data);\n}\n\n// @private Used internally by the C API.\ntemplate <typename InputType>\nstatic void nm_fftw_actually_set(VALUE nmatrix, VALUE plan_data)\n{\n  fftw_data* data;\n  Data_Get_Struct(plan_data, fftw_data, data);\n  memcpy((InputType*)data->input, (InputType*)NM_DENSE_ELEMENTS(nmatrix), \n    sizeof(InputType)*NM_DENSE_COUNT(nmatrix));\n}\n\n/** \\brief Here is a brief description of what this function does.\n *\n * @param[in,out] self       Object on which the function is called.\n * @param[in]     plan_data  An internal data structure of type \n *   NMatrix::FFTW::Plan::Data that is created by Data_Wrap_Struct in \n *   nm_fftw_create_plan and which encapsulates the FFTW plan in a Ruby object.\n * @param[in]     nmatrix    An NMatrix object (pre-allocated) which contains the\n *   input elements for the fourier transform.\n * @param[in]     type       A number representing the type of fourier transform \n *   being performed. (:complex_complex, :real_complex, :complex_real or :real_real).\n *\n * \\returns self\n */\nstatic VALUE nm_fftw_set_input(VALUE self, VALUE nmatrix, VALUE plan_data, \n  VALUE type)\n{\n  switch(FIX2INT(type))\n  {\n    case TYPE_COMPLEX_COMPLEX:\n    case TYPE_COMPLEX_REAL:\n      nm_fftw_actually_set<fftw_complex>(nmatrix, plan_data);\n      break;\n    case TYPE_REAL_COMPLEX:\n    case TYPE_REAL_REAL:\n      nm_fftw_actually_set<double>(nmatrix, plan_data);\n      break;\n    default:\n      rb_raise(rb_eArgError, \"Invalid type of DFT.\");\n  }\n\n  return self;\n}\n\n// @private Used internally by the C API.\n// Call fftw_execute and copy the resulting data into the nmatrix object.\ntemplate <typename OutputType>\nstatic void nm_fftw_actually_execute(VALUE nmatrix, VALUE plan_data)\n{\n  fftw_data *data;\n  Data_Get_Struct(plan_data, fftw_data, data);\n  fftw_execute(data->plan);\n  memcpy((OutputType*)NM_DENSE_ELEMENTS(nmatrix), (OutputType*)data->output, \n    sizeof(OutputType)*NM_DENSE_COUNT(nmatrix));\n}\n\n/** \\brief Executes the fourier transform by calling the fftw_execute function \n * and copies the output to the output nmatrix object, which can be accessed from\n * Ruby.\n *\n * @param[in] self       Object on which the function is called.\n * @param[in] plan_data  An internal data structure of type \n *   NMatrix::FFTW::Plan::Data that is created by Data_Wrap_Struct in \n *   nm_fftw_create_plan and which encapsulates the FFTW plan in a Ruby object.\n * @param[in] nmatrix    An NMatrix object (pre-allocated) into which the computed\n *   data will be copied.\n * @param[in] type       A number representing the type of fourier transform being\n *   performed. (:complex_complex, :real_complex, :complex_real or :real_real).\n *\n * \\returns TrueClass if computation completed without errors.\n */\nstatic VALUE nm_fftw_execute(VALUE self, VALUE nmatrix, VALUE plan_data, VALUE type)\n{\n  switch(FIX2INT(type))\n  {\n    case TYPE_COMPLEX_COMPLEX:\n    case TYPE_REAL_COMPLEX:\n      nm_fftw_actually_execute<fftw_complex>(nmatrix, plan_data);\n      break;\n    case TYPE_COMPLEX_REAL:\n    case TYPE_REAL_REAL:\n      nm_fftw_actually_execute<double>(nmatrix, plan_data);\n      break;\n    default:\n      rb_raise(rb_eTypeError, \"Invalid type of DFT.\");\n  }\n\n  return Qtrue;\n}\n\nextern \"C\" {\n  void Init_nmatrix_fftw() \n  {\n    VALUE cNMatrix                = rb_define_class(\"NMatrix\", rb_cObject);\n    VALUE cNMatrix_FFTW           = rb_define_module_under(cNMatrix, \"FFTW\");\n    VALUE cNMatrix_FFTW_Plan      = rb_define_class_under(cNMatrix_FFTW, \"Plan\", \n      rb_cObject);\n    VALUE cNMatrix_FFTW_Plan_Data = rb_define_class_under(\n      cNMatrix_FFTW_Plan, \"Data\", rb_cObject);\n\n    rb_define_private_method(cNMatrix_FFTW_Plan, \"c_create_plan\", \n      (METHOD)nm_fftw_create_plan, 7);\n    rb_define_private_method(cNMatrix_FFTW_Plan, \"c_set_input\",\n      (METHOD)nm_fftw_set_input, 3);\n    rb_define_private_method(cNMatrix_FFTW_Plan, \"c_execute\",\n      (METHOD)nm_fftw_execute, 3);\n  }\n}\n"
  },
  {
    "path": "ext/nmatrix_java/nmatrix/math/MathHelper.java",
    "content": "import org.apache.commons.math3.util.FastMath;\nimport org.apache.commons.math3.special.Erf;\nimport org.apache.commons.math3.special.Gamma;\n\npublic class MathHelper{\n\n  public static double[] log(double base, double[] arr){\n    double[] result = new double[arr.length];\n    for(int i = 0; i< arr.length; i++){\n      result[i] = FastMath.log(base, arr[i]);\n    } \n    return result;\n  }\n\n  public static double[] erf(double[] arr){\n    double[] result = new double[arr.length];\n    for(int i = 0; i< arr.length; i++){\n      result[i] = Erf.erf(arr[i]);\n    } \n    return result;\n  }\n\n  public static double[] erfc(double[] arr){\n    double[] result = new double[arr.length];\n    for(int i = 0; i< arr.length; i++){\n      result[i] = Erf.erfc(arr[i]);\n    } \n    return result;\n  }\n\n  public static double[] gamma(double[] arr){\n    double[] result = new double[arr.length];\n    for(int i = 0; i< arr.length; i++){\n      result[i] = Gamma.gamma(arr[i]);\n    } \n    return result;\n  }\n\n  public static double[] round(double[] arr){\n    double[] result = new double[arr.length];\n    for(int i = 0; i< arr.length; i++){\n      result[i] = Math.round(arr[i]);\n    } \n    return result;\n  }\n\n  public static double[] ldexp(double[] arr1, double[] arr){\n    double[] result = new double[arr1.length];\n    for(int i = 0; i< arr1.length; i++){\n      result[i] = arr1[i] * Math.pow(2, arr[i]);\n    } \n    return result;\n  }\n\n  public static double[] ldexpScalar(double val, double[] arr){\n    double[] result = new double[arr.length];\n    for(int i = 0; i< arr.length; i++){\n      result[i] = val * Math.pow(2, arr[i]);\n    } \n    return result;\n  }\n\n  public static double[] ldexpScalar2(double val, double[] arr){\n    double[] result = new double[arr.length];\n    for(int i = 0; i< arr.length; i++){\n      result[i] = arr[i] * Math.pow(2, val);\n    } \n    return result;\n  }\n\n  public static double[] hypot(double[] arr1, double[] arr2){\n    double[] result = new double[arr1.length];\n    for(int i = 0; i< arr1.length; i++){\n      result[i] =  Math.sqrt(arr2[i] * arr2[i] + arr1[i] * arr1[i]);\n    } \n    return result;\n  }\n\n  public static double[] hypotScalar(double val, double[] arr){\n    double[] result = new double[arr.length];\n    for(int i = 0; i< arr.length; i++){\n      result[i] =  Math.sqrt(arr[i] * arr[i] + val * val);\n    } \n    return result;\n  }\n\n  public static double[] atan2(double[] arr1, double[] arr2){\n    double[] result = new double[arr1.length];\n    for(int i = 0; i< arr1.length; i++){\n      result[i] =  Math.atan2(arr2[i], arr1[i]);\n    } \n    return result;\n  }\n\n  public static double[] atan2Scalar(double val, double[] arr){\n    double[] result = new double[arr.length];\n    for(int i = 0; i< arr.length; i++){\n      result[i] =  Math.atan2(val, arr[i]);\n    } \n    return result;\n  }\n\n  public static double[] atan2Scalar2(double val, double[] arr){\n    double[] result = new double[arr.length];\n    for(int i = 0; i< arr.length; i++){\n      result[i] =  Math.atan2(arr[i], val);\n    } \n    return result;\n  }\n\n}"
  },
  {
    "path": "ext/nmatrix_java/nmatrix/storage/dense/ArrayComparator.java",
    "content": "public class ArrayComparator{\n\n  public static boolean equals(double[] arr1, double[] arr2){\n\n    double delta = 1e-3;\n    \n    for(int i=0; i < arr1.length; i++){\n      if(Math.abs(arr1[i] - arr2[i]) > delta){\n        return false;\n      }\n    }\n    \n    return true;\n  \n  }\n}"
  },
  {
    "path": "ext/nmatrix_java/nmatrix/util/ArrayGenerator.java",
    "content": "public class ArrayGenerator{\n  // Array from Matrix begin\n\n  public static double[] getArrayDouble(double[][] matrix, int row, int col)\n  {\n    double[] array = new double[row * col];\n    for (int index=0, i=0; i < row ; i++){\n        for (int j=0; j < col; j++){\n            array[index] = matrix[i][j];\n            index++;\n        }\n    }\n\n    return array;\n  }\n\n  public static float[] getArrayFloat(float[][] matrix, int row, int col)\n  {\n    float[] array = new float[row * col];\n    for (int index=0, i=0; i < row ; i++){\n        for (int j=0; j < col; j++){\n            array[index] = matrix[i][j];\n            index++;\n        }\n    }\n\n    return array;\n  }\n\n  public static double[] getArrayColMajorDouble(double[][] matrix, int col, int row)\n  {\n    double[] array = new double[row * col];\n    for (int index=0, i=0; i < col ; i++){\n        for (int j=0; j < row; j++){\n            array[index] = matrix[i][j];\n            index++;\n        }\n    }\n\n    return array;\n  }\n\n  public static float[] getArrayColMajorFloat(float[][] matrix, int col, int row)\n  {\n    float[] array = new float[row * col];\n    for (int index=0, i=0; i < col ; i++){\n        for (int j=0; j < row; j++){\n            array[index] = matrix[i][j];\n            index++;\n        }\n    }\n\n    return array;\n  }\n\n  public static float[] getArrayFloatFromDouble(double[][] matrix, int row, int col)\n  {\n    float[] array = new float[row * col];\n    for (int index=0, i=0; i < row ; i++){\n        for (int j=0; j < col; j++){\n            array[index] = (float)matrix[i][j];\n            index++;\n        }\n    }\n\n    return array;\n  }\n\n\n\n  // Array from Matrix end\n\n  // typeCast beging\n\n  public static float[] convertArrayFloatFromDouble(double[] array){\n    float[] resultArray = new float[array.length];\n    for (int i=0; i < array.length ; i++){\n      array[i] = (float)array[i];\n    }\n\n    return resultArray;\n  }\n\n  // typeCast end\n}"
  },
  {
    "path": "ext/nmatrix_java/nmatrix/util/MatrixGenerator.java",
    "content": "public class MatrixGenerator\n{ \n\n  // Matrix from Array begin\n  public static float[][] getMatrixFloat(float[] array, int row, int col)\n  {\n    float[][] matrix = new float[row][col];\n    for (int index=0, i=0; i < row ; i++){\n        for (int j=0; j < col; j++){\n            matrix[i][j]= array[index];\n            index++;\n        }\n    }\n\n    return matrix;\n     \n  }\n\n  public static double[][] getMatrixDouble(double[] array, int row, int col)\n  {\n    double[][] matrix = new double[row][col];\n    for (int index=0, i=0; i < row ; i++){\n        for (int j=0; j < col; j++){\n            matrix[i][j]= array[index];\n            index++;\n        }\n    }\n\n    return matrix;\n     \n  }\n\n  public static float[][] getMatrixColMajorFloat(float[] array, int col, int row)\n  {\n    float[][] matrix = new float[col][row];\n    for (int index=0, i=0; i < col ; i++){\n        for (int j=0; j < row; j++){\n            matrix[i][j]= array[index];\n            index++;\n        }\n    }\n\n    return matrix;\n     \n  }\n\n  // Matrix from Array end\n\n\n}"
  },
  {
    "path": "ext/nmatrix_java/nmatrix/util/WrapperType.java",
    "content": "//http://stackoverflow.com/questions/709961/determining-if-an-object-is-of-primitive-type\n\nimport java.util.*;\n\npublic class WrapperType\n{\n  // How-to-use?\n  // public static void main(String[] args)\n  // {        Object o = 1;\n  //     System.out.println(isWrapperType(String.class));\n  //     System.out.println(isWrapperType(o.getClass()));\n  // }\n\n  private static final Set<Class<?>> WRAPPER_TYPES = getWrapperTypes();\n\n  public static boolean isWrapperType(Class<?> clazz)\n  {\n    return WRAPPER_TYPES.contains(clazz);\n  }\n\n  private static Set<Class<?>> getWrapperTypes()\n  {\n    Set<Class<?>> ret = new HashSet<Class<?>>();\n    ret.add(Boolean.class);\n    ret.add(Character.class);\n    ret.add(Byte.class);\n    ret.add(Short.class);\n    ret.add(Integer.class);\n    ret.add(Long.class);\n    ret.add(Float.class);\n    ret.add(Double.class);\n    ret.add(Void.class);\n    return ret;\n  }\n}"
  },
  {
    "path": "ext/nmatrix_java/test/AssertTests.java",
    "content": "import static org.hamcrest.CoreMatchers.allOf;\nimport static org.hamcrest.CoreMatchers.anyOf;\nimport static org.hamcrest.CoreMatchers.both;\nimport static org.hamcrest.CoreMatchers.containsString;\nimport static org.hamcrest.CoreMatchers.equalTo;\nimport static org.hamcrest.CoreMatchers.everyItem;\nimport static org.hamcrest.CoreMatchers.hasItems;\nimport static org.hamcrest.CoreMatchers.not;\nimport static org.hamcrest.CoreMatchers.sameInstance;\nimport static org.hamcrest.CoreMatchers.startsWith;\nimport static org.junit.Assert.assertArrayEquals;\nimport static org.junit.Assert.assertEquals;\nimport static org.junit.Assert.assertFalse;\nimport static org.junit.Assert.assertNotNull;\nimport static org.junit.Assert.assertNotSame;\nimport static org.junit.Assert.assertNull;\nimport static org.junit.Assert.assertSame;\nimport static org.junit.Assert.assertThat;\nimport static org.junit.Assert.assertTrue;\n\nimport java.util.Arrays;\n\nimport org.hamcrest.core.CombinableMatcher;\nimport org.junit.Test;\n\npublic class AssertTests {\n\n  @Test\n  public void testNMatrixdtypeEquals() {\n    assertEquals(\"failure - dtypes are not equal\", \"tex\", \"text\");\n  }\n  @Test\n  public void testAssertArrayEquals() {\n    byte[] expected = \"trial\".getBytes();\n    byte[] actual = \"trial\".getBytes();\n    assertArrayEquals(\"failure - byte arrays not same\", expected, actual);\n  }\n\n  @Test\n  public void testAssertEquals() {\n    assertEquals(\"failure - strings are not equal\", \"tex\", \"text\");\n  }\n\n  @Test\n  public void testAssertFalse() {\n    assertFalse(\"failure - should be false\", false);\n  }\n\n  @Test\n  public void testAssertNotNull() {\n    assertNotNull(\"should not be null\", new Object());\n  }\n\n  @Test\n  public void testAssertNotSame() {\n    assertNotSame(\"should not be same Object\", new Object(), new Object());\n  }\n\n  @Test\n  public void testAssertNull() {\n    assertNull(\"should be null\", null);\n  }\n\n  @Test\n  public void testAssertSame() {\n    Integer aNumber = Integer.valueOf(768);\n    assertSame(\"should be same\", aNumber, aNumber);\n  }\n\n  // JUnit Matchers assertThat\n  @Test\n  public void testAssertThatBothContainsString() {\n    assertThat(\"albumen\", both(containsString(\"a\")).and(containsString(\"b\")));\n  }\n\n  @Test\n  public void testAssertThatHasItems() {\n    assertThat(Arrays.asList(\"one\", \"two\", \"three\"), hasItems(\"one\", \"three\"));\n  }\n\n  @Test\n  public void testAssertThatEveryItemContainsString() {\n    assertThat(Arrays.asList(new String[] { \"fun\", \"ban\", \"net\" }), everyItem(containsString(\"n\")));\n  }\n\n  // Core Hamcrest Matchers with assertThat\n  @Test\n  public void testAssertThatHamcrestCoreMatchers() {\n    assertThat(\"good\", allOf(equalTo(\"good\"), startsWith(\"good\")));\n    assertThat(\"good\", not(allOf(equalTo(\"bad\"), equalTo(\"good\"))));\n    assertThat(\"good\", anyOf(equalTo(\"bad\"), equalTo(\"good\")));\n    assertThat(7, not(CombinableMatcher.<Integer> either(equalTo(3)).or(equalTo(4))));\n    assertThat(new Object(), not(sameInstance(new Object())));\n  }\n\n  @Test\n  public void testAssertTrue() {\n    assertTrue(\"failure - should be true\", true);\n  }\n}"
  },
  {
    "path": "ext/nmatrix_java/test/TestRunner.java",
    "content": "public class TestRunner{\n\tpublic static void main(String[] args) {\n\t\tAssertTests test1 = new AssertTests();\n\t\ttest1.testAssertArrayEquals();\n\t\ttest1.testAssertEquals();\n\t}\n}"
  },
  {
    "path": "ext/nmatrix_lapacke/extconf.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == extconf.rb\n#\n# This file checks for ATLAS and other necessary headers, and\n# generates a Makefile for compiling NMatrix.\n\nrequire \"nmatrix/mkmf\"\n\n#$INSTALLFILES = [['nmatrix.h', '$(archdir)'], ['nmatrix.hpp', '$(archdir)'], ['nmatrix_config.h', '$(archdir)'], ['nm_memory.h', '$(archdir)']]\nif /cygwin|mingw/ =~ RUBY_PLATFORM\n  #$INSTALLFILES << ['libnmatrix.a', '$(archdir)']\nend\n\n$DEBUG = true\n#not the right way to add this include directory\n$CFLAGS = [\"-Wall -Werror=return-type -I$(srcdir)/../nmatrix -I$(srcdir)/lapacke/include\",$CFLAGS].join(\" \")\n$CXXFLAGS = [\"-Wall -Werror=return-type -I$(srcdir)/../nmatrix -I$(srcdir)/lapacke/include\",$CXXFLAGS].join(\" \")\n$CPPFLAGS = [\"-Wall -Werror=return-type -I$(srcdir)/../nmatrix -I$(srcdir)/lapacke/include\",$CPPFLAGS].join(\" \")\n\n# When adding objects here, make sure their directories are included in CLEANOBJS down at the bottom of extconf.rb.\n# Why not just autogenerate this list from all .c/.cpp files in directory?\nbasenames = %w{nmatrix_lapacke math_lapacke lapacke}\n$objs = basenames.map { |b| \"#{b}.o\"   }\n$srcs = basenames.map { |b| \"#{b}.cpp\" }\n\n# For some reason, if we try to look for /usr/lib64/atlas on a Mac OS X Mavericks system, and the directory does not\n# exist, it will give a linker error -- even if the lib dir is already correctly included with -L. So we need to check\n# that Dir.exists?(d) for each.\nldefaults = {lapack: [\"/usr/local/lib\"].delete_if { |d| !Dir.exists?(d) } }\n\n# It is not clear how this variable should be defined, or if it is necessary at all. \n# See issue https://github.com/SciRuby/nmatrix/issues/403\nidefaults = {lapack: [] }\n\nunless have_library(\"lapack\")\n  dir_config(\"lapack\", idefaults[:lapack], ldefaults[:lapack])\nend\n\n# Order matters here: ATLAS has to go after LAPACK: http://mail.scipy.org/pipermail/scipy-user/2007-January/010717.html\n$libs += \" -llapack \"\n#To use the Intel MKL, comment out the line above, and also comment out the bit above with have_library and dir_config for lapack.\n#Then add something like the line below (for exactly what linker flags to use see https://software.intel.com/en-us/articles/intel-mkl-link-line-advisor ):\n#$libs += \" -L${MKLROOT}/lib/intel64 -lmkl_intel_lp64 -lmkl_core -lmkl_sequential \"\n\ncreate_conf_h(\"nmatrix_lapacke_config.h\")\ncreate_makefile(\"nmatrix_lapacke\")\n\n# to clean up object files in subdirectories:\nopen('Makefile', 'a') do |f|\n  clean_objs_paths = %w{ }.map { |d| \"#{d}/*.#{CONFIG[\"OBJEXT\"]}\" }\n  f.write(\"CLEANOBJS := $(CLEANOBJS) #{clean_objs_paths.join(' ')}\")\nend\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/include/lapacke.h",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#ifndef _LAPACKE_H_\n#define _LAPACKE_H_\n\n/*\n*  Turn on HAVE_LAPACK_CONFIG_H to redefine C-LAPACK datatypes\n*/\n#ifdef HAVE_LAPACK_CONFIG_H\n#include \"lapacke_config.h\"\n#endif\n\n#include <stdlib.h>\n\n#ifndef lapack_int\n#define lapack_int     int\n#endif\n\n#ifndef lapack_logical\n#define lapack_logical lapack_int\n#endif\n\n/* Complex types are structures equivalent to the\n* Fortran complex types COMPLEX(4) and COMPLEX(8).\n*\n* One can also redefine the types with his own types\n* for example by including in the code definitions like\n*\n* #define lapack_complex_float std::complex<float>\n* #define lapack_complex_double std::complex<double>\n*\n* or define these types in the command line:\n*\n* -Dlapack_complex_float=\"std::complex<float>\"\n* -Dlapack_complex_double=\"std::complex<double>\"\n*/\n\n#ifndef LAPACK_COMPLEX_CUSTOM\n\n/* Complex type (single precision) */\n#ifndef lapack_complex_float\n#include <complex.h>\n#define lapack_complex_float    float _Complex\n#endif\n\n#ifndef lapack_complex_float_real\n#define lapack_complex_float_real(z)       (creal(z))\n#endif\n\n#ifndef lapack_complex_float_imag\n#define lapack_complex_float_imag(z)       (cimag(z))\n#endif\n\nlapack_complex_float lapack_make_complex_float( float re, float im );\n\n/* Complex type (double precision) */\n#ifndef lapack_complex_double\n#include <complex.h>\n#define lapack_complex_double   double _Complex\n#endif\n\n#ifndef lapack_complex_double_real\n#define lapack_complex_double_real(z)      (creal(z))\n#endif\n\n#ifndef lapack_complex_double_imag\n#define lapack_complex_double_imag(z)       (cimag(z))\n#endif\n\nlapack_complex_double lapack_make_complex_double( double re, double im );\n\n#endif\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif /* __cplusplus */\n\n#ifndef LAPACKE_malloc\n#define LAPACKE_malloc( size ) malloc( size )\n#endif\n#ifndef LAPACKE_free\n#define LAPACKE_free( p )      free( p )\n#endif\n\n#define LAPACK_C2INT( x ) (lapack_int)(*((float*)&x ))\n#define LAPACK_Z2INT( x ) (lapack_int)(*((double*)&x ))\n\n#define LAPACK_ROW_MAJOR               101\n#define LAPACK_COL_MAJOR               102\n\n#define LAPACK_WORK_MEMORY_ERROR       -1010\n#define LAPACK_TRANSPOSE_MEMORY_ERROR  -1011\n\n/* Callback logical functions of one, two, or three arguments are used\n*  to select eigenvalues to sort to the top left of the Schur form.\n*  The value is selected if function returns TRUE (non-zero). */\n\ntypedef lapack_logical (*LAPACK_S_SELECT2) ( const float*, const float* );\ntypedef lapack_logical (*LAPACK_S_SELECT3)\n    ( const float*, const float*, const float* );\ntypedef lapack_logical (*LAPACK_D_SELECT2) ( const double*, const double* );\ntypedef lapack_logical (*LAPACK_D_SELECT3)\n    ( const double*, const double*, const double* );\n\ntypedef lapack_logical (*LAPACK_C_SELECT1) ( const lapack_complex_float* );\ntypedef lapack_logical (*LAPACK_C_SELECT2)\n    ( const lapack_complex_float*, const lapack_complex_float* );\ntypedef lapack_logical (*LAPACK_Z_SELECT1) ( const lapack_complex_double* );\ntypedef lapack_logical (*LAPACK_Z_SELECT2)\n    ( const lapack_complex_double*, const lapack_complex_double* );\n\n#include \"lapacke_mangling.h\"\n\n#define LAPACK_lsame LAPACK_GLOBAL(lsame,LSAME)\nlapack_logical LAPACK_lsame( char* ca,  char* cb,\n                              lapack_int lca, lapack_int lcb );\n\n/* C-LAPACK function prototypes */\n\nlapack_int LAPACKE_sbdsdc( int matrix_order, char uplo, char compq,\n                           lapack_int n, float* d, float* e, float* u,\n                           lapack_int ldu, float* vt, lapack_int ldvt, float* q,\n                           lapack_int* iq );\nlapack_int LAPACKE_dbdsdc( int matrix_order, char uplo, char compq,\n                           lapack_int n, double* d, double* e, double* u,\n                           lapack_int ldu, double* vt, lapack_int ldvt,\n                           double* q, lapack_int* iq );\n\nlapack_int LAPACKE_sbdsqr( int matrix_order, char uplo, lapack_int n,\n                           lapack_int ncvt, lapack_int nru, lapack_int ncc,\n                           float* d, float* e, float* vt, lapack_int ldvt,\n                           float* u, lapack_int ldu, float* c, lapack_int ldc );\nlapack_int LAPACKE_dbdsqr( int matrix_order, char uplo, lapack_int n,\n                           lapack_int ncvt, lapack_int nru, lapack_int ncc,\n                           double* d, double* e, double* vt, lapack_int ldvt,\n                           double* u, lapack_int ldu, double* c,\n                           lapack_int ldc );\nlapack_int LAPACKE_cbdsqr( int matrix_order, char uplo, lapack_int n,\n                           lapack_int ncvt, lapack_int nru, lapack_int ncc,\n                           float* d, float* e, lapack_complex_float* vt,\n                           lapack_int ldvt, lapack_complex_float* u,\n                           lapack_int ldu, lapack_complex_float* c,\n                           lapack_int ldc );\nlapack_int LAPACKE_zbdsqr( int matrix_order, char uplo, lapack_int n,\n                           lapack_int ncvt, lapack_int nru, lapack_int ncc,\n                           double* d, double* e, lapack_complex_double* vt,\n                           lapack_int ldvt, lapack_complex_double* u,\n                           lapack_int ldu, lapack_complex_double* c,\n                           lapack_int ldc );\n\nlapack_int LAPACKE_sdisna( char job, lapack_int m, lapack_int n, const float* d,\n                           float* sep );\nlapack_int LAPACKE_ddisna( char job, lapack_int m, lapack_int n,\n                           const double* d, double* sep );\n\nlapack_int LAPACKE_sgbbrd( int matrix_order, char vect, lapack_int m,\n                           lapack_int n, lapack_int ncc, lapack_int kl,\n                           lapack_int ku, float* ab, lapack_int ldab, float* d,\n                           float* e, float* q, lapack_int ldq, float* pt,\n                           lapack_int ldpt, float* c, lapack_int ldc );\nlapack_int LAPACKE_dgbbrd( int matrix_order, char vect, lapack_int m,\n                           lapack_int n, lapack_int ncc, lapack_int kl,\n                           lapack_int ku, double* ab, lapack_int ldab,\n                           double* d, double* e, double* q, lapack_int ldq,\n                           double* pt, lapack_int ldpt, double* c,\n                           lapack_int ldc );\nlapack_int LAPACKE_cgbbrd( int matrix_order, char vect, lapack_int m,\n                           lapack_int n, lapack_int ncc, lapack_int kl,\n                           lapack_int ku, lapack_complex_float* ab,\n                           lapack_int ldab, float* d, float* e,\n                           lapack_complex_float* q, lapack_int ldq,\n                           lapack_complex_float* pt, lapack_int ldpt,\n                           lapack_complex_float* c, lapack_int ldc );\nlapack_int LAPACKE_zgbbrd( int matrix_order, char vect, lapack_int m,\n                           lapack_int n, lapack_int ncc, lapack_int kl,\n                           lapack_int ku, lapack_complex_double* ab,\n                           lapack_int ldab, double* d, double* e,\n                           lapack_complex_double* q, lapack_int ldq,\n                           lapack_complex_double* pt, lapack_int ldpt,\n                           lapack_complex_double* c, lapack_int ldc );\n\nlapack_int LAPACKE_sgbcon( int matrix_order, char norm, lapack_int n,\n                           lapack_int kl, lapack_int ku, const float* ab,\n                           lapack_int ldab, const lapack_int* ipiv, float anorm,\n                           float* rcond );\nlapack_int LAPACKE_dgbcon( int matrix_order, char norm, lapack_int n,\n                           lapack_int kl, lapack_int ku, const double* ab,\n                           lapack_int ldab, const lapack_int* ipiv,\n                           double anorm, double* rcond );\nlapack_int LAPACKE_cgbcon( int matrix_order, char norm, lapack_int n,\n                           lapack_int kl, lapack_int ku,\n                           const lapack_complex_float* ab, lapack_int ldab,\n                           const lapack_int* ipiv, float anorm, float* rcond );\nlapack_int LAPACKE_zgbcon( int matrix_order, char norm, lapack_int n,\n                           lapack_int kl, lapack_int ku,\n                           const lapack_complex_double* ab, lapack_int ldab,\n                           const lapack_int* ipiv, double anorm,\n                           double* rcond );\n\nlapack_int LAPACKE_sgbequ( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku, const float* ab,\n                           lapack_int ldab, float* r, float* c, float* rowcnd,\n                           float* colcnd, float* amax );\nlapack_int LAPACKE_dgbequ( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku, const double* ab,\n                           lapack_int ldab, double* r, double* c,\n                           double* rowcnd, double* colcnd, double* amax );\nlapack_int LAPACKE_cgbequ( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku,\n                           const lapack_complex_float* ab, lapack_int ldab,\n                           float* r, float* c, float* rowcnd, float* colcnd,\n                           float* amax );\nlapack_int LAPACKE_zgbequ( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku,\n                           const lapack_complex_double* ab, lapack_int ldab,\n                           double* r, double* c, double* rowcnd, double* colcnd,\n                           double* amax );\n\nlapack_int LAPACKE_sgbequb( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_int kl, lapack_int ku, const float* ab,\n                            lapack_int ldab, float* r, float* c, float* rowcnd,\n                            float* colcnd, float* amax );\nlapack_int LAPACKE_dgbequb( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_int kl, lapack_int ku, const double* ab,\n                            lapack_int ldab, double* r, double* c,\n                            double* rowcnd, double* colcnd, double* amax );\nlapack_int LAPACKE_cgbequb( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_int kl, lapack_int ku,\n                            const lapack_complex_float* ab, lapack_int ldab,\n                            float* r, float* c, float* rowcnd, float* colcnd,\n                            float* amax );\nlapack_int LAPACKE_zgbequb( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_int kl, lapack_int ku,\n                            const lapack_complex_double* ab, lapack_int ldab,\n                            double* r, double* c, double* rowcnd,\n                            double* colcnd, double* amax );\n\nlapack_int LAPACKE_sgbrfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int kl, lapack_int ku, lapack_int nrhs,\n                           const float* ab, lapack_int ldab, const float* afb,\n                           lapack_int ldafb, const lapack_int* ipiv,\n                           const float* b, lapack_int ldb, float* x,\n                           lapack_int ldx, float* ferr, float* berr );\nlapack_int LAPACKE_dgbrfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int kl, lapack_int ku, lapack_int nrhs,\n                           const double* ab, lapack_int ldab, const double* afb,\n                           lapack_int ldafb, const lapack_int* ipiv,\n                           const double* b, lapack_int ldb, double* x,\n                           lapack_int ldx, double* ferr, double* berr );\nlapack_int LAPACKE_cgbrfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int kl, lapack_int ku, lapack_int nrhs,\n                           const lapack_complex_float* ab, lapack_int ldab,\n                           const lapack_complex_float* afb, lapack_int ldafb,\n                           const lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zgbrfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int kl, lapack_int ku, lapack_int nrhs,\n                           const lapack_complex_double* ab, lapack_int ldab,\n                           const lapack_complex_double* afb, lapack_int ldafb,\n                           const lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_sgbrfsx( int matrix_order, char trans, char equed,\n                            lapack_int n, lapack_int kl, lapack_int ku,\n                            lapack_int nrhs, const float* ab, lapack_int ldab,\n                            const float* afb, lapack_int ldafb,\n                            const lapack_int* ipiv, const float* r,\n                            const float* c, const float* b, lapack_int ldb,\n                            float* x, lapack_int ldx, float* rcond, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_dgbrfsx( int matrix_order, char trans, char equed,\n                            lapack_int n, lapack_int kl, lapack_int ku,\n                            lapack_int nrhs, const double* ab, lapack_int ldab,\n                            const double* afb, lapack_int ldafb,\n                            const lapack_int* ipiv, const double* r,\n                            const double* c, const double* b, lapack_int ldb,\n                            double* x, lapack_int ldx, double* rcond,\n                            double* berr, lapack_int n_err_bnds,\n                            double* err_bnds_norm, double* err_bnds_comp,\n                            lapack_int nparams, double* params );\nlapack_int LAPACKE_cgbrfsx( int matrix_order, char trans, char equed,\n                            lapack_int n, lapack_int kl, lapack_int ku,\n                            lapack_int nrhs, const lapack_complex_float* ab,\n                            lapack_int ldab, const lapack_complex_float* afb,\n                            lapack_int ldafb, const lapack_int* ipiv,\n                            const float* r, const float* c,\n                            const lapack_complex_float* b, lapack_int ldb,\n                            lapack_complex_float* x, lapack_int ldx,\n                            float* rcond, float* berr, lapack_int n_err_bnds,\n                            float* err_bnds_norm, float* err_bnds_comp,\n                            lapack_int nparams, float* params );\nlapack_int LAPACKE_zgbrfsx( int matrix_order, char trans, char equed,\n                            lapack_int n, lapack_int kl, lapack_int ku,\n                            lapack_int nrhs, const lapack_complex_double* ab,\n                            lapack_int ldab, const lapack_complex_double* afb,\n                            lapack_int ldafb, const lapack_int* ipiv,\n                            const double* r, const double* c,\n                            const lapack_complex_double* b, lapack_int ldb,\n                            lapack_complex_double* x, lapack_int ldx,\n                            double* rcond, double* berr, lapack_int n_err_bnds,\n                            double* err_bnds_norm, double* err_bnds_comp,\n                            lapack_int nparams, double* params );\n\nlapack_int LAPACKE_sgbsv( int matrix_order, lapack_int n, lapack_int kl,\n                          lapack_int ku, lapack_int nrhs, float* ab,\n                          lapack_int ldab, lapack_int* ipiv, float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_dgbsv( int matrix_order, lapack_int n, lapack_int kl,\n                          lapack_int ku, lapack_int nrhs, double* ab,\n                          lapack_int ldab, lapack_int* ipiv, double* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_cgbsv( int matrix_order, lapack_int n, lapack_int kl,\n                          lapack_int ku, lapack_int nrhs,\n                          lapack_complex_float* ab, lapack_int ldab,\n                          lapack_int* ipiv, lapack_complex_float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_zgbsv( int matrix_order, lapack_int n, lapack_int kl,\n                          lapack_int ku, lapack_int nrhs,\n                          lapack_complex_double* ab, lapack_int ldab,\n                          lapack_int* ipiv, lapack_complex_double* b,\n                          lapack_int ldb );\n\nlapack_int LAPACKE_sgbsvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int kl, lapack_int ku,\n                           lapack_int nrhs, float* ab, lapack_int ldab,\n                           float* afb, lapack_int ldafb, lapack_int* ipiv,\n                           char* equed, float* r, float* c, float* b,\n                           lapack_int ldb, float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr,\n                           float* rpivot );\nlapack_int LAPACKE_dgbsvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int kl, lapack_int ku,\n                           lapack_int nrhs, double* ab, lapack_int ldab,\n                           double* afb, lapack_int ldafb, lapack_int* ipiv,\n                           char* equed, double* r, double* c, double* b,\n                           lapack_int ldb, double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr,\n                           double* rpivot );\nlapack_int LAPACKE_cgbsvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int kl, lapack_int ku,\n                           lapack_int nrhs, lapack_complex_float* ab,\n                           lapack_int ldab, lapack_complex_float* afb,\n                           lapack_int ldafb, lapack_int* ipiv, char* equed,\n                           float* r, float* c, lapack_complex_float* b,\n                           lapack_int ldb, lapack_complex_float* x,\n                           lapack_int ldx, float* rcond, float* ferr,\n                           float* berr, float* rpivot );\nlapack_int LAPACKE_zgbsvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int kl, lapack_int ku,\n                           lapack_int nrhs, lapack_complex_double* ab,\n                           lapack_int ldab, lapack_complex_double* afb,\n                           lapack_int ldafb, lapack_int* ipiv, char* equed,\n                           double* r, double* c, lapack_complex_double* b,\n                           lapack_int ldb, lapack_complex_double* x,\n                           lapack_int ldx, double* rcond, double* ferr,\n                           double* berr, double* rpivot );\n\nlapack_int LAPACKE_sgbsvxx( int matrix_order, char fact, char trans,\n                            lapack_int n, lapack_int kl, lapack_int ku,\n                            lapack_int nrhs, float* ab, lapack_int ldab,\n                            float* afb, lapack_int ldafb, lapack_int* ipiv,\n                            char* equed, float* r, float* c, float* b,\n                            lapack_int ldb, float* x, lapack_int ldx,\n                            float* rcond, float* rpvgrw, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_dgbsvxx( int matrix_order, char fact, char trans,\n                            lapack_int n, lapack_int kl, lapack_int ku,\n                            lapack_int nrhs, double* ab, lapack_int ldab,\n                            double* afb, lapack_int ldafb, lapack_int* ipiv,\n                            char* equed, double* r, double* c, double* b,\n                            lapack_int ldb, double* x, lapack_int ldx,\n                            double* rcond, double* rpvgrw, double* berr,\n                            lapack_int n_err_bnds, double* err_bnds_norm,\n                            double* err_bnds_comp, lapack_int nparams,\n                            double* params );\nlapack_int LAPACKE_cgbsvxx( int matrix_order, char fact, char trans,\n                            lapack_int n, lapack_int kl, lapack_int ku,\n                            lapack_int nrhs, lapack_complex_float* ab,\n                            lapack_int ldab, lapack_complex_float* afb,\n                            lapack_int ldafb, lapack_int* ipiv, char* equed,\n                            float* r, float* c, lapack_complex_float* b,\n                            lapack_int ldb, lapack_complex_float* x,\n                            lapack_int ldx, float* rcond, float* rpvgrw,\n                            float* berr, lapack_int n_err_bnds,\n                            float* err_bnds_norm, float* err_bnds_comp,\n                            lapack_int nparams, float* params );\nlapack_int LAPACKE_zgbsvxx( int matrix_order, char fact, char trans,\n                            lapack_int n, lapack_int kl, lapack_int ku,\n                            lapack_int nrhs, lapack_complex_double* ab,\n                            lapack_int ldab, lapack_complex_double* afb,\n                            lapack_int ldafb, lapack_int* ipiv, char* equed,\n                            double* r, double* c, lapack_complex_double* b,\n                            lapack_int ldb, lapack_complex_double* x,\n                            lapack_int ldx, double* rcond, double* rpvgrw,\n                            double* berr, lapack_int n_err_bnds,\n                            double* err_bnds_norm, double* err_bnds_comp,\n                            lapack_int nparams, double* params );\n\nlapack_int LAPACKE_sgbtrf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku, float* ab,\n                           lapack_int ldab, lapack_int* ipiv );\nlapack_int LAPACKE_dgbtrf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku, double* ab,\n                           lapack_int ldab, lapack_int* ipiv );\nlapack_int LAPACKE_cgbtrf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku,\n                           lapack_complex_float* ab, lapack_int ldab,\n                           lapack_int* ipiv );\nlapack_int LAPACKE_zgbtrf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku,\n                           lapack_complex_double* ab, lapack_int ldab,\n                           lapack_int* ipiv );\n\nlapack_int LAPACKE_sgbtrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int kl, lapack_int ku, lapack_int nrhs,\n                           const float* ab, lapack_int ldab,\n                           const lapack_int* ipiv, float* b, lapack_int ldb );\nlapack_int LAPACKE_dgbtrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int kl, lapack_int ku, lapack_int nrhs,\n                           const double* ab, lapack_int ldab,\n                           const lapack_int* ipiv, double* b, lapack_int ldb );\nlapack_int LAPACKE_cgbtrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int kl, lapack_int ku, lapack_int nrhs,\n                           const lapack_complex_float* ab, lapack_int ldab,\n                           const lapack_int* ipiv, lapack_complex_float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_zgbtrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int kl, lapack_int ku, lapack_int nrhs,\n                           const lapack_complex_double* ab, lapack_int ldab,\n                           const lapack_int* ipiv, lapack_complex_double* b,\n                           lapack_int ldb );\n\nlapack_int LAPACKE_sgebak( int matrix_order, char job, char side, lapack_int n,\n                           lapack_int ilo, lapack_int ihi, const float* scale,\n                           lapack_int m, float* v, lapack_int ldv );\nlapack_int LAPACKE_dgebak( int matrix_order, char job, char side, lapack_int n,\n                           lapack_int ilo, lapack_int ihi, const double* scale,\n                           lapack_int m, double* v, lapack_int ldv );\nlapack_int LAPACKE_cgebak( int matrix_order, char job, char side, lapack_int n,\n                           lapack_int ilo, lapack_int ihi, const float* scale,\n                           lapack_int m, lapack_complex_float* v,\n                           lapack_int ldv );\nlapack_int LAPACKE_zgebak( int matrix_order, char job, char side, lapack_int n,\n                           lapack_int ilo, lapack_int ihi, const double* scale,\n                           lapack_int m, lapack_complex_double* v,\n                           lapack_int ldv );\n\nlapack_int LAPACKE_sgebal( int matrix_order, char job, lapack_int n, float* a,\n                           lapack_int lda, lapack_int* ilo, lapack_int* ihi,\n                           float* scale );\nlapack_int LAPACKE_dgebal( int matrix_order, char job, lapack_int n, double* a,\n                           lapack_int lda, lapack_int* ilo, lapack_int* ihi,\n                           double* scale );\nlapack_int LAPACKE_cgebal( int matrix_order, char job, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* ilo, lapack_int* ihi, float* scale );\nlapack_int LAPACKE_zgebal( int matrix_order, char job, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* ilo, lapack_int* ihi, double* scale );\n\nlapack_int LAPACKE_sgebrd( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, float* d, float* e,\n                           float* tauq, float* taup );\nlapack_int LAPACKE_dgebrd( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, double* d, double* e,\n                           double* tauq, double* taup );\nlapack_int LAPACKE_cgebrd( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda, float* d,\n                           float* e, lapack_complex_float* tauq,\n                           lapack_complex_float* taup );\nlapack_int LAPACKE_zgebrd( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda, double* d,\n                           double* e, lapack_complex_double* tauq,\n                           lapack_complex_double* taup );\n\nlapack_int LAPACKE_sgecon( int matrix_order, char norm, lapack_int n,\n                           const float* a, lapack_int lda, float anorm,\n                           float* rcond );\nlapack_int LAPACKE_dgecon( int matrix_order, char norm, lapack_int n,\n                           const double* a, lapack_int lda, double anorm,\n                           double* rcond );\nlapack_int LAPACKE_cgecon( int matrix_order, char norm, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           float anorm, float* rcond );\nlapack_int LAPACKE_zgecon( int matrix_order, char norm, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           double anorm, double* rcond );\n\nlapack_int LAPACKE_sgeequ( int matrix_order, lapack_int m, lapack_int n,\n                           const float* a, lapack_int lda, float* r, float* c,\n                           float* rowcnd, float* colcnd, float* amax );\nlapack_int LAPACKE_dgeequ( int matrix_order, lapack_int m, lapack_int n,\n                           const double* a, lapack_int lda, double* r,\n                           double* c, double* rowcnd, double* colcnd,\n                           double* amax );\nlapack_int LAPACKE_cgeequ( int matrix_order, lapack_int m, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           float* r, float* c, float* rowcnd, float* colcnd,\n                           float* amax );\nlapack_int LAPACKE_zgeequ( int matrix_order, lapack_int m, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           double* r, double* c, double* rowcnd, double* colcnd,\n                           double* amax );\n\nlapack_int LAPACKE_sgeequb( int matrix_order, lapack_int m, lapack_int n,\n                            const float* a, lapack_int lda, float* r, float* c,\n                            float* rowcnd, float* colcnd, float* amax );\nlapack_int LAPACKE_dgeequb( int matrix_order, lapack_int m, lapack_int n,\n                            const double* a, lapack_int lda, double* r,\n                            double* c, double* rowcnd, double* colcnd,\n                            double* amax );\nlapack_int LAPACKE_cgeequb( int matrix_order, lapack_int m, lapack_int n,\n                            const lapack_complex_float* a, lapack_int lda,\n                            float* r, float* c, float* rowcnd, float* colcnd,\n                            float* amax );\nlapack_int LAPACKE_zgeequb( int matrix_order, lapack_int m, lapack_int n,\n                            const lapack_complex_double* a, lapack_int lda,\n                            double* r, double* c, double* rowcnd,\n                            double* colcnd, double* amax );\n\nlapack_int LAPACKE_sgees( int matrix_order, char jobvs, char sort,\n                          LAPACK_S_SELECT2 select, lapack_int n, float* a,\n                          lapack_int lda, lapack_int* sdim, float* wr,\n                          float* wi, float* vs, lapack_int ldvs );\nlapack_int LAPACKE_dgees( int matrix_order, char jobvs, char sort,\n                          LAPACK_D_SELECT2 select, lapack_int n, double* a,\n                          lapack_int lda, lapack_int* sdim, double* wr,\n                          double* wi, double* vs, lapack_int ldvs );\nlapack_int LAPACKE_cgees( int matrix_order, char jobvs, char sort,\n                          LAPACK_C_SELECT1 select, lapack_int n,\n                          lapack_complex_float* a, lapack_int lda,\n                          lapack_int* sdim, lapack_complex_float* w,\n                          lapack_complex_float* vs, lapack_int ldvs );\nlapack_int LAPACKE_zgees( int matrix_order, char jobvs, char sort,\n                          LAPACK_Z_SELECT1 select, lapack_int n,\n                          lapack_complex_double* a, lapack_int lda,\n                          lapack_int* sdim, lapack_complex_double* w,\n                          lapack_complex_double* vs, lapack_int ldvs );\n\nlapack_int LAPACKE_sgeesx( int matrix_order, char jobvs, char sort,\n                           LAPACK_S_SELECT2 select, char sense, lapack_int n,\n                           float* a, lapack_int lda, lapack_int* sdim,\n                           float* wr, float* wi, float* vs, lapack_int ldvs,\n                           float* rconde, float* rcondv );\nlapack_int LAPACKE_dgeesx( int matrix_order, char jobvs, char sort,\n                           LAPACK_D_SELECT2 select, char sense, lapack_int n,\n                           double* a, lapack_int lda, lapack_int* sdim,\n                           double* wr, double* wi, double* vs, lapack_int ldvs,\n                           double* rconde, double* rcondv );\nlapack_int LAPACKE_cgeesx( int matrix_order, char jobvs, char sort,\n                           LAPACK_C_SELECT1 select, char sense, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* sdim, lapack_complex_float* w,\n                           lapack_complex_float* vs, lapack_int ldvs,\n                           float* rconde, float* rcondv );\nlapack_int LAPACKE_zgeesx( int matrix_order, char jobvs, char sort,\n                           LAPACK_Z_SELECT1 select, char sense, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* sdim, lapack_complex_double* w,\n                           lapack_complex_double* vs, lapack_int ldvs,\n                           double* rconde, double* rcondv );\n\nlapack_int LAPACKE_sgeev( int matrix_order, char jobvl, char jobvr,\n                          lapack_int n, float* a, lapack_int lda, float* wr,\n                          float* wi, float* vl, lapack_int ldvl, float* vr,\n                          lapack_int ldvr );\nlapack_int LAPACKE_dgeev( int matrix_order, char jobvl, char jobvr,\n                          lapack_int n, double* a, lapack_int lda, double* wr,\n                          double* wi, double* vl, lapack_int ldvl, double* vr,\n                          lapack_int ldvr );\nlapack_int LAPACKE_cgeev( int matrix_order, char jobvl, char jobvr,\n                          lapack_int n, lapack_complex_float* a, lapack_int lda,\n                          lapack_complex_float* w, lapack_complex_float* vl,\n                          lapack_int ldvl, lapack_complex_float* vr,\n                          lapack_int ldvr );\nlapack_int LAPACKE_zgeev( int matrix_order, char jobvl, char jobvr,\n                          lapack_int n, lapack_complex_double* a,\n                          lapack_int lda, lapack_complex_double* w,\n                          lapack_complex_double* vl, lapack_int ldvl,\n                          lapack_complex_double* vr, lapack_int ldvr );\n\nlapack_int LAPACKE_sgeevx( int matrix_order, char balanc, char jobvl,\n                           char jobvr, char sense, lapack_int n, float* a,\n                           lapack_int lda, float* wr, float* wi, float* vl,\n                           lapack_int ldvl, float* vr, lapack_int ldvr,\n                           lapack_int* ilo, lapack_int* ihi, float* scale,\n                           float* abnrm, float* rconde, float* rcondv );\nlapack_int LAPACKE_dgeevx( int matrix_order, char balanc, char jobvl,\n                           char jobvr, char sense, lapack_int n, double* a,\n                           lapack_int lda, double* wr, double* wi, double* vl,\n                           lapack_int ldvl, double* vr, lapack_int ldvr,\n                           lapack_int* ilo, lapack_int* ihi, double* scale,\n                           double* abnrm, double* rconde, double* rcondv );\nlapack_int LAPACKE_cgeevx( int matrix_order, char balanc, char jobvl,\n                           char jobvr, char sense, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* w, lapack_complex_float* vl,\n                           lapack_int ldvl, lapack_complex_float* vr,\n                           lapack_int ldvr, lapack_int* ilo, lapack_int* ihi,\n                           float* scale, float* abnrm, float* rconde,\n                           float* rcondv );\nlapack_int LAPACKE_zgeevx( int matrix_order, char balanc, char jobvl,\n                           char jobvr, char sense, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* w, lapack_complex_double* vl,\n                           lapack_int ldvl, lapack_complex_double* vr,\n                           lapack_int ldvr, lapack_int* ilo, lapack_int* ihi,\n                           double* scale, double* abnrm, double* rconde,\n                           double* rcondv );\n\nlapack_int LAPACKE_sgehrd( int matrix_order, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, float* a, lapack_int lda,\n                           float* tau );\nlapack_int LAPACKE_dgehrd( int matrix_order, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, double* a, lapack_int lda,\n                           double* tau );\nlapack_int LAPACKE_cgehrd( int matrix_order, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* tau );\nlapack_int LAPACKE_zgehrd( int matrix_order, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* tau );\n\nlapack_int LAPACKE_sgejsv( int matrix_order, char joba, char jobu, char jobv,\n                           char jobr, char jobt, char jobp, lapack_int m,\n                           lapack_int n, float* a, lapack_int lda, float* sva,\n                           float* u, lapack_int ldu, float* v, lapack_int ldv,\n                           float* stat, lapack_int* istat );\nlapack_int LAPACKE_dgejsv( int matrix_order, char joba, char jobu, char jobv,\n                           char jobr, char jobt, char jobp, lapack_int m,\n                           lapack_int n, double* a, lapack_int lda, double* sva,\n                           double* u, lapack_int ldu, double* v, lapack_int ldv,\n                           double* stat, lapack_int* istat );\n\nlapack_int LAPACKE_sgelq2( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, float* tau );\nlapack_int LAPACKE_dgelq2( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, double* tau );\nlapack_int LAPACKE_cgelq2( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* tau );\nlapack_int LAPACKE_zgelq2( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* tau );\n\nlapack_int LAPACKE_sgelqf( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, float* tau );\nlapack_int LAPACKE_dgelqf( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, double* tau );\nlapack_int LAPACKE_cgelqf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* tau );\nlapack_int LAPACKE_zgelqf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* tau );\n\nlapack_int LAPACKE_sgels( int matrix_order, char trans, lapack_int m,\n                          lapack_int n, lapack_int nrhs, float* a,\n                          lapack_int lda, float* b, lapack_int ldb );\nlapack_int LAPACKE_dgels( int matrix_order, char trans, lapack_int m,\n                          lapack_int n, lapack_int nrhs, double* a,\n                          lapack_int lda, double* b, lapack_int ldb );\nlapack_int LAPACKE_cgels( int matrix_order, char trans, lapack_int m,\n                          lapack_int n, lapack_int nrhs,\n                          lapack_complex_float* a, lapack_int lda,\n                          lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zgels( int matrix_order, char trans, lapack_int m,\n                          lapack_int n, lapack_int nrhs,\n                          lapack_complex_double* a, lapack_int lda,\n                          lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sgelsd( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, float* a, lapack_int lda, float* b,\n                           lapack_int ldb, float* s, float rcond,\n                           lapack_int* rank );\nlapack_int LAPACKE_dgelsd( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, double* a, lapack_int lda,\n                           double* b, lapack_int ldb, double* s, double rcond,\n                           lapack_int* rank );\nlapack_int LAPACKE_cgelsd( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb, float* s, float rcond,\n                           lapack_int* rank );\nlapack_int LAPACKE_zgelsd( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb, double* s, double rcond,\n                           lapack_int* rank );\n\nlapack_int LAPACKE_sgelss( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, float* a, lapack_int lda, float* b,\n                           lapack_int ldb, float* s, float rcond,\n                           lapack_int* rank );\nlapack_int LAPACKE_dgelss( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, double* a, lapack_int lda,\n                           double* b, lapack_int ldb, double* s, double rcond,\n                           lapack_int* rank );\nlapack_int LAPACKE_cgelss( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb, float* s, float rcond,\n                           lapack_int* rank );\nlapack_int LAPACKE_zgelss( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb, double* s, double rcond,\n                           lapack_int* rank );\n\nlapack_int LAPACKE_sgelsy( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, float* a, lapack_int lda, float* b,\n                           lapack_int ldb, lapack_int* jpvt, float rcond,\n                           lapack_int* rank );\nlapack_int LAPACKE_dgelsy( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, double* a, lapack_int lda,\n                           double* b, lapack_int ldb, lapack_int* jpvt,\n                           double rcond, lapack_int* rank );\nlapack_int LAPACKE_cgelsy( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb, lapack_int* jpvt, float rcond,\n                           lapack_int* rank );\nlapack_int LAPACKE_zgelsy( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb, lapack_int* jpvt, double rcond,\n                           lapack_int* rank );\n\nlapack_int LAPACKE_sgeqlf( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, float* tau );\nlapack_int LAPACKE_dgeqlf( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, double* tau );\nlapack_int LAPACKE_cgeqlf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* tau );\nlapack_int LAPACKE_zgeqlf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* tau );\n\nlapack_int LAPACKE_sgeqp3( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, lapack_int* jpvt,\n                           float* tau );\nlapack_int LAPACKE_dgeqp3( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, lapack_int* jpvt,\n                           double* tau );\nlapack_int LAPACKE_cgeqp3( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* jpvt, lapack_complex_float* tau );\nlapack_int LAPACKE_zgeqp3( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* jpvt, lapack_complex_double* tau );\n\nlapack_int LAPACKE_sgeqpf( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, lapack_int* jpvt,\n                           float* tau );\nlapack_int LAPACKE_dgeqpf( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, lapack_int* jpvt,\n                           double* tau );\nlapack_int LAPACKE_cgeqpf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* jpvt, lapack_complex_float* tau );\nlapack_int LAPACKE_zgeqpf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* jpvt, lapack_complex_double* tau );\n\nlapack_int LAPACKE_sgeqr2( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, float* tau );\nlapack_int LAPACKE_dgeqr2( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, double* tau );\nlapack_int LAPACKE_cgeqr2( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* tau );\nlapack_int LAPACKE_zgeqr2( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* tau );\n\nlapack_int LAPACKE_sgeqrf( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, float* tau );\nlapack_int LAPACKE_dgeqrf( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, double* tau );\nlapack_int LAPACKE_cgeqrf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* tau );\nlapack_int LAPACKE_zgeqrf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* tau );\n\nlapack_int LAPACKE_sgeqrfp( int matrix_order, lapack_int m, lapack_int n,\n                            float* a, lapack_int lda, float* tau );\nlapack_int LAPACKE_dgeqrfp( int matrix_order, lapack_int m, lapack_int n,\n                            double* a, lapack_int lda, double* tau );\nlapack_int LAPACKE_cgeqrfp( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_complex_float* a, lapack_int lda,\n                            lapack_complex_float* tau );\nlapack_int LAPACKE_zgeqrfp( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_complex_double* a, lapack_int lda,\n                            lapack_complex_double* tau );\n\nlapack_int LAPACKE_sgerfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const float* a, lapack_int lda,\n                           const float* af, lapack_int ldaf,\n                           const lapack_int* ipiv, const float* b,\n                           lapack_int ldb, float* x, lapack_int ldx,\n                           float* ferr, float* berr );\nlapack_int LAPACKE_dgerfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const double* a, lapack_int lda,\n                           const double* af, lapack_int ldaf,\n                           const lapack_int* ipiv, const double* b,\n                           lapack_int ldb, double* x, lapack_int ldx,\n                           double* ferr, double* berr );\nlapack_int LAPACKE_cgerfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* af,\n                           lapack_int ldaf, const lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zgerfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* af,\n                           lapack_int ldaf, const lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_sgerfsx( int matrix_order, char trans, char equed,\n                            lapack_int n, lapack_int nrhs, const float* a,\n                            lapack_int lda, const float* af, lapack_int ldaf,\n                            const lapack_int* ipiv, const float* r,\n                            const float* c, const float* b, lapack_int ldb,\n                            float* x, lapack_int ldx, float* rcond, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_dgerfsx( int matrix_order, char trans, char equed,\n                            lapack_int n, lapack_int nrhs, const double* a,\n                            lapack_int lda, const double* af, lapack_int ldaf,\n                            const lapack_int* ipiv, const double* r,\n                            const double* c, const double* b, lapack_int ldb,\n                            double* x, lapack_int ldx, double* rcond,\n                            double* berr, lapack_int n_err_bnds,\n                            double* err_bnds_norm, double* err_bnds_comp,\n                            lapack_int nparams, double* params );\nlapack_int LAPACKE_cgerfsx( int matrix_order, char trans, char equed,\n                            lapack_int n, lapack_int nrhs,\n                            const lapack_complex_float* a, lapack_int lda,\n                            const lapack_complex_float* af, lapack_int ldaf,\n                            const lapack_int* ipiv, const float* r,\n                            const float* c, const lapack_complex_float* b,\n                            lapack_int ldb, lapack_complex_float* x,\n                            lapack_int ldx, float* rcond, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_zgerfsx( int matrix_order, char trans, char equed,\n                            lapack_int n, lapack_int nrhs,\n                            const lapack_complex_double* a, lapack_int lda,\n                            const lapack_complex_double* af, lapack_int ldaf,\n                            const lapack_int* ipiv, const double* r,\n                            const double* c, const lapack_complex_double* b,\n                            lapack_int ldb, lapack_complex_double* x,\n                            lapack_int ldx, double* rcond, double* berr,\n                            lapack_int n_err_bnds, double* err_bnds_norm,\n                            double* err_bnds_comp, lapack_int nparams,\n                            double* params );\n\nlapack_int LAPACKE_sgerqf( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, float* tau );\nlapack_int LAPACKE_dgerqf( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, double* tau );\nlapack_int LAPACKE_cgerqf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* tau );\nlapack_int LAPACKE_zgerqf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* tau );\n\nlapack_int LAPACKE_sgesdd( int matrix_order, char jobz, lapack_int m,\n                           lapack_int n, float* a, lapack_int lda, float* s,\n                           float* u, lapack_int ldu, float* vt,\n                           lapack_int ldvt );\nlapack_int LAPACKE_dgesdd( int matrix_order, char jobz, lapack_int m,\n                           lapack_int n, double* a, lapack_int lda, double* s,\n                           double* u, lapack_int ldu, double* vt,\n                           lapack_int ldvt );\nlapack_int LAPACKE_cgesdd( int matrix_order, char jobz, lapack_int m,\n                           lapack_int n, lapack_complex_float* a,\n                           lapack_int lda, float* s, lapack_complex_float* u,\n                           lapack_int ldu, lapack_complex_float* vt,\n                           lapack_int ldvt );\nlapack_int LAPACKE_zgesdd( int matrix_order, char jobz, lapack_int m,\n                           lapack_int n, lapack_complex_double* a,\n                           lapack_int lda, double* s, lapack_complex_double* u,\n                           lapack_int ldu, lapack_complex_double* vt,\n                           lapack_int ldvt );\n\nlapack_int LAPACKE_sgesv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          float* a, lapack_int lda, lapack_int* ipiv, float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_dgesv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          double* a, lapack_int lda, lapack_int* ipiv,\n                          double* b, lapack_int ldb );\nlapack_int LAPACKE_cgesv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          lapack_complex_float* a, lapack_int lda,\n                          lapack_int* ipiv, lapack_complex_float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_zgesv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          lapack_complex_double* a, lapack_int lda,\n                          lapack_int* ipiv, lapack_complex_double* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_dsgesv( int matrix_order, lapack_int n, lapack_int nrhs,\n                           double* a, lapack_int lda, lapack_int* ipiv,\n                           double* b, lapack_int ldb, double* x, lapack_int ldx,\n                           lapack_int* iter );\nlapack_int LAPACKE_zcgesv( int matrix_order, lapack_int n, lapack_int nrhs,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* ipiv, lapack_complex_double* b,\n                           lapack_int ldb, lapack_complex_double* x,\n                           lapack_int ldx, lapack_int* iter );\n\nlapack_int LAPACKE_sgesvd( int matrix_order, char jobu, char jobvt,\n                           lapack_int m, lapack_int n, float* a, lapack_int lda,\n                           float* s, float* u, lapack_int ldu, float* vt,\n                           lapack_int ldvt, float* superb );\nlapack_int LAPACKE_dgesvd( int matrix_order, char jobu, char jobvt,\n                           lapack_int m, lapack_int n, double* a,\n                           lapack_int lda, double* s, double* u, lapack_int ldu,\n                           double* vt, lapack_int ldvt, double* superb );\nlapack_int LAPACKE_cgesvd( int matrix_order, char jobu, char jobvt,\n                           lapack_int m, lapack_int n, lapack_complex_float* a,\n                           lapack_int lda, float* s, lapack_complex_float* u,\n                           lapack_int ldu, lapack_complex_float* vt,\n                           lapack_int ldvt, float* superb );\nlapack_int LAPACKE_zgesvd( int matrix_order, char jobu, char jobvt,\n                           lapack_int m, lapack_int n, lapack_complex_double* a,\n                           lapack_int lda, double* s, lapack_complex_double* u,\n                           lapack_int ldu, lapack_complex_double* vt,\n                           lapack_int ldvt, double* superb );\n\nlapack_int LAPACKE_sgesvj( int matrix_order, char joba, char jobu, char jobv,\n                           lapack_int m, lapack_int n, float* a, lapack_int lda,\n                           float* sva, lapack_int mv, float* v, lapack_int ldv,\n                           float* stat );\nlapack_int LAPACKE_dgesvj( int matrix_order, char joba, char jobu, char jobv,\n                           lapack_int m, lapack_int n, double* a,\n                           lapack_int lda, double* sva, lapack_int mv,\n                           double* v, lapack_int ldv, double* stat );\n\nlapack_int LAPACKE_sgesvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int nrhs, float* a,\n                           lapack_int lda, float* af, lapack_int ldaf,\n                           lapack_int* ipiv, char* equed, float* r, float* c,\n                           float* b, lapack_int ldb, float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr,\n                           float* rpivot );\nlapack_int LAPACKE_dgesvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int nrhs, double* a,\n                           lapack_int lda, double* af, lapack_int ldaf,\n                           lapack_int* ipiv, char* equed, double* r, double* c,\n                           double* b, lapack_int ldb, double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr,\n                           double* rpivot );\nlapack_int LAPACKE_cgesvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int nrhs,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* af, lapack_int ldaf,\n                           lapack_int* ipiv, char* equed, float* r, float* c,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr,\n                           float* rpivot );\nlapack_int LAPACKE_zgesvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int nrhs,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* af, lapack_int ldaf,\n                           lapack_int* ipiv, char* equed, double* r, double* c,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr,\n                           double* rpivot );\n\nlapack_int LAPACKE_sgesvxx( int matrix_order, char fact, char trans,\n                            lapack_int n, lapack_int nrhs, float* a,\n                            lapack_int lda, float* af, lapack_int ldaf,\n                            lapack_int* ipiv, char* equed, float* r, float* c,\n                            float* b, lapack_int ldb, float* x, lapack_int ldx,\n                            float* rcond, float* rpvgrw, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_dgesvxx( int matrix_order, char fact, char trans,\n                            lapack_int n, lapack_int nrhs, double* a,\n                            lapack_int lda, double* af, lapack_int ldaf,\n                            lapack_int* ipiv, char* equed, double* r, double* c,\n                            double* b, lapack_int ldb, double* x,\n                            lapack_int ldx, double* rcond, double* rpvgrw,\n                            double* berr, lapack_int n_err_bnds,\n                            double* err_bnds_norm, double* err_bnds_comp,\n                            lapack_int nparams, double* params );\nlapack_int LAPACKE_cgesvxx( int matrix_order, char fact, char trans,\n                            lapack_int n, lapack_int nrhs,\n                            lapack_complex_float* a, lapack_int lda,\n                            lapack_complex_float* af, lapack_int ldaf,\n                            lapack_int* ipiv, char* equed, float* r, float* c,\n                            lapack_complex_float* b, lapack_int ldb,\n                            lapack_complex_float* x, lapack_int ldx,\n                            float* rcond, float* rpvgrw, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_zgesvxx( int matrix_order, char fact, char trans,\n                            lapack_int n, lapack_int nrhs,\n                            lapack_complex_double* a, lapack_int lda,\n                            lapack_complex_double* af, lapack_int ldaf,\n                            lapack_int* ipiv, char* equed, double* r, double* c,\n                            lapack_complex_double* b, lapack_int ldb,\n                            lapack_complex_double* x, lapack_int ldx,\n                            double* rcond, double* rpvgrw, double* berr,\n                            lapack_int n_err_bnds, double* err_bnds_norm,\n                            double* err_bnds_comp, lapack_int nparams,\n                            double* params );\n\nlapack_int LAPACKE_sgetf2( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, lapack_int* ipiv );\nlapack_int LAPACKE_dgetf2( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, lapack_int* ipiv );\nlapack_int LAPACKE_cgetf2( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* ipiv );\nlapack_int LAPACKE_zgetf2( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* ipiv );\n\nlapack_int LAPACKE_sgetrf( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, lapack_int* ipiv );\nlapack_int LAPACKE_dgetrf( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, lapack_int* ipiv );\nlapack_int LAPACKE_cgetrf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* ipiv );\nlapack_int LAPACKE_zgetrf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* ipiv );\n\nlapack_int LAPACKE_sgetri( int matrix_order, lapack_int n, float* a,\n                           lapack_int lda, const lapack_int* ipiv );\nlapack_int LAPACKE_dgetri( int matrix_order, lapack_int n, double* a,\n                           lapack_int lda, const lapack_int* ipiv );\nlapack_int LAPACKE_cgetri( int matrix_order, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           const lapack_int* ipiv );\nlapack_int LAPACKE_zgetri( int matrix_order, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           const lapack_int* ipiv );\n\nlapack_int LAPACKE_sgetrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const float* a, lapack_int lda,\n                           const lapack_int* ipiv, float* b, lapack_int ldb );\nlapack_int LAPACKE_dgetrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const double* a, lapack_int lda,\n                           const lapack_int* ipiv, double* b, lapack_int ldb );\nlapack_int LAPACKE_cgetrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, const lapack_int* ipiv,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zgetrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, const lapack_int* ipiv,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sggbak( int matrix_order, char job, char side, lapack_int n,\n                           lapack_int ilo, lapack_int ihi, const float* lscale,\n                           const float* rscale, lapack_int m, float* v,\n                           lapack_int ldv );\nlapack_int LAPACKE_dggbak( int matrix_order, char job, char side, lapack_int n,\n                           lapack_int ilo, lapack_int ihi, const double* lscale,\n                           const double* rscale, lapack_int m, double* v,\n                           lapack_int ldv );\nlapack_int LAPACKE_cggbak( int matrix_order, char job, char side, lapack_int n,\n                           lapack_int ilo, lapack_int ihi, const float* lscale,\n                           const float* rscale, lapack_int m,\n                           lapack_complex_float* v, lapack_int ldv );\nlapack_int LAPACKE_zggbak( int matrix_order, char job, char side, lapack_int n,\n                           lapack_int ilo, lapack_int ihi, const double* lscale,\n                           const double* rscale, lapack_int m,\n                           lapack_complex_double* v, lapack_int ldv );\n\nlapack_int LAPACKE_sggbal( int matrix_order, char job, lapack_int n, float* a,\n                           lapack_int lda, float* b, lapack_int ldb,\n                           lapack_int* ilo, lapack_int* ihi, float* lscale,\n                           float* rscale );\nlapack_int LAPACKE_dggbal( int matrix_order, char job, lapack_int n, double* a,\n                           lapack_int lda, double* b, lapack_int ldb,\n                           lapack_int* ilo, lapack_int* ihi, double* lscale,\n                           double* rscale );\nlapack_int LAPACKE_cggbal( int matrix_order, char job, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_int* ilo, lapack_int* ihi, float* lscale,\n                           float* rscale );\nlapack_int LAPACKE_zggbal( int matrix_order, char job, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_int* ilo, lapack_int* ihi, double* lscale,\n                           double* rscale );\n\nlapack_int LAPACKE_sgges( int matrix_order, char jobvsl, char jobvsr, char sort,\n                          LAPACK_S_SELECT3 selctg, lapack_int n, float* a,\n                          lapack_int lda, float* b, lapack_int ldb,\n                          lapack_int* sdim, float* alphar, float* alphai,\n                          float* beta, float* vsl, lapack_int ldvsl, float* vsr,\n                          lapack_int ldvsr );\nlapack_int LAPACKE_dgges( int matrix_order, char jobvsl, char jobvsr, char sort,\n                          LAPACK_D_SELECT3 selctg, lapack_int n, double* a,\n                          lapack_int lda, double* b, lapack_int ldb,\n                          lapack_int* sdim, double* alphar, double* alphai,\n                          double* beta, double* vsl, lapack_int ldvsl,\n                          double* vsr, lapack_int ldvsr );\nlapack_int LAPACKE_cgges( int matrix_order, char jobvsl, char jobvsr, char sort,\n                          LAPACK_C_SELECT2 selctg, lapack_int n,\n                          lapack_complex_float* a, lapack_int lda,\n                          lapack_complex_float* b, lapack_int ldb,\n                          lapack_int* sdim, lapack_complex_float* alpha,\n                          lapack_complex_float* beta, lapack_complex_float* vsl,\n                          lapack_int ldvsl, lapack_complex_float* vsr,\n                          lapack_int ldvsr );\nlapack_int LAPACKE_zgges( int matrix_order, char jobvsl, char jobvsr, char sort,\n                          LAPACK_Z_SELECT2 selctg, lapack_int n,\n                          lapack_complex_double* a, lapack_int lda,\n                          lapack_complex_double* b, lapack_int ldb,\n                          lapack_int* sdim, lapack_complex_double* alpha,\n                          lapack_complex_double* beta,\n                          lapack_complex_double* vsl, lapack_int ldvsl,\n                          lapack_complex_double* vsr, lapack_int ldvsr );\n\nlapack_int LAPACKE_sggesx( int matrix_order, char jobvsl, char jobvsr,\n                           char sort, LAPACK_S_SELECT3 selctg, char sense,\n                           lapack_int n, float* a, lapack_int lda, float* b,\n                           lapack_int ldb, lapack_int* sdim, float* alphar,\n                           float* alphai, float* beta, float* vsl,\n                           lapack_int ldvsl, float* vsr, lapack_int ldvsr,\n                           float* rconde, float* rcondv );\nlapack_int LAPACKE_dggesx( int matrix_order, char jobvsl, char jobvsr,\n                           char sort, LAPACK_D_SELECT3 selctg, char sense,\n                           lapack_int n, double* a, lapack_int lda, double* b,\n                           lapack_int ldb, lapack_int* sdim, double* alphar,\n                           double* alphai, double* beta, double* vsl,\n                           lapack_int ldvsl, double* vsr, lapack_int ldvsr,\n                           double* rconde, double* rcondv );\nlapack_int LAPACKE_cggesx( int matrix_order, char jobvsl, char jobvsr,\n                           char sort, LAPACK_C_SELECT2 selctg, char sense,\n                           lapack_int n, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb, lapack_int* sdim,\n                           lapack_complex_float* alpha,\n                           lapack_complex_float* beta,\n                           lapack_complex_float* vsl, lapack_int ldvsl,\n                           lapack_complex_float* vsr, lapack_int ldvsr,\n                           float* rconde, float* rcondv );\nlapack_int LAPACKE_zggesx( int matrix_order, char jobvsl, char jobvsr,\n                           char sort, LAPACK_Z_SELECT2 selctg, char sense,\n                           lapack_int n, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb, lapack_int* sdim,\n                           lapack_complex_double* alpha,\n                           lapack_complex_double* beta,\n                           lapack_complex_double* vsl, lapack_int ldvsl,\n                           lapack_complex_double* vsr, lapack_int ldvsr,\n                           double* rconde, double* rcondv );\n\nlapack_int LAPACKE_sggev( int matrix_order, char jobvl, char jobvr,\n                          lapack_int n, float* a, lapack_int lda, float* b,\n                          lapack_int ldb, float* alphar, float* alphai,\n                          float* beta, float* vl, lapack_int ldvl, float* vr,\n                          lapack_int ldvr );\nlapack_int LAPACKE_dggev( int matrix_order, char jobvl, char jobvr,\n                          lapack_int n, double* a, lapack_int lda, double* b,\n                          lapack_int ldb, double* alphar, double* alphai,\n                          double* beta, double* vl, lapack_int ldvl, double* vr,\n                          lapack_int ldvr );\nlapack_int LAPACKE_cggev( int matrix_order, char jobvl, char jobvr,\n                          lapack_int n, lapack_complex_float* a, lapack_int lda,\n                          lapack_complex_float* b, lapack_int ldb,\n                          lapack_complex_float* alpha,\n                          lapack_complex_float* beta, lapack_complex_float* vl,\n                          lapack_int ldvl, lapack_complex_float* vr,\n                          lapack_int ldvr );\nlapack_int LAPACKE_zggev( int matrix_order, char jobvl, char jobvr,\n                          lapack_int n, lapack_complex_double* a,\n                          lapack_int lda, lapack_complex_double* b,\n                          lapack_int ldb, lapack_complex_double* alpha,\n                          lapack_complex_double* beta,\n                          lapack_complex_double* vl, lapack_int ldvl,\n                          lapack_complex_double* vr, lapack_int ldvr );\n\nlapack_int LAPACKE_sggevx( int matrix_order, char balanc, char jobvl,\n                           char jobvr, char sense, lapack_int n, float* a,\n                           lapack_int lda, float* b, lapack_int ldb,\n                           float* alphar, float* alphai, float* beta, float* vl,\n                           lapack_int ldvl, float* vr, lapack_int ldvr,\n                           lapack_int* ilo, lapack_int* ihi, float* lscale,\n                           float* rscale, float* abnrm, float* bbnrm,\n                           float* rconde, float* rcondv );\nlapack_int LAPACKE_dggevx( int matrix_order, char balanc, char jobvl,\n                           char jobvr, char sense, lapack_int n, double* a,\n                           lapack_int lda, double* b, lapack_int ldb,\n                           double* alphar, double* alphai, double* beta,\n                           double* vl, lapack_int ldvl, double* vr,\n                           lapack_int ldvr, lapack_int* ilo, lapack_int* ihi,\n                           double* lscale, double* rscale, double* abnrm,\n                           double* bbnrm, double* rconde, double* rcondv );\nlapack_int LAPACKE_cggevx( int matrix_order, char balanc, char jobvl,\n                           char jobvr, char sense, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* alpha,\n                           lapack_complex_float* beta, lapack_complex_float* vl,\n                           lapack_int ldvl, lapack_complex_float* vr,\n                           lapack_int ldvr, lapack_int* ilo, lapack_int* ihi,\n                           float* lscale, float* rscale, float* abnrm,\n                           float* bbnrm, float* rconde, float* rcondv );\nlapack_int LAPACKE_zggevx( int matrix_order, char balanc, char jobvl,\n                           char jobvr, char sense, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* alpha,\n                           lapack_complex_double* beta,\n                           lapack_complex_double* vl, lapack_int ldvl,\n                           lapack_complex_double* vr, lapack_int ldvr,\n                           lapack_int* ilo, lapack_int* ihi, double* lscale,\n                           double* rscale, double* abnrm, double* bbnrm,\n                           double* rconde, double* rcondv );\n\nlapack_int LAPACKE_sggglm( int matrix_order, lapack_int n, lapack_int m,\n                           lapack_int p, float* a, lapack_int lda, float* b,\n                           lapack_int ldb, float* d, float* x, float* y );\nlapack_int LAPACKE_dggglm( int matrix_order, lapack_int n, lapack_int m,\n                           lapack_int p, double* a, lapack_int lda, double* b,\n                           lapack_int ldb, double* d, double* x, double* y );\nlapack_int LAPACKE_cggglm( int matrix_order, lapack_int n, lapack_int m,\n                           lapack_int p, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb, lapack_complex_float* d,\n                           lapack_complex_float* x, lapack_complex_float* y );\nlapack_int LAPACKE_zggglm( int matrix_order, lapack_int n, lapack_int m,\n                           lapack_int p, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb, lapack_complex_double* d,\n                           lapack_complex_double* x, lapack_complex_double* y );\n\nlapack_int LAPACKE_sgghrd( int matrix_order, char compq, char compz,\n                           lapack_int n, lapack_int ilo, lapack_int ihi,\n                           float* a, lapack_int lda, float* b, lapack_int ldb,\n                           float* q, lapack_int ldq, float* z, lapack_int ldz );\nlapack_int LAPACKE_dgghrd( int matrix_order, char compq, char compz,\n                           lapack_int n, lapack_int ilo, lapack_int ihi,\n                           double* a, lapack_int lda, double* b, lapack_int ldb,\n                           double* q, lapack_int ldq, double* z,\n                           lapack_int ldz );\nlapack_int LAPACKE_cgghrd( int matrix_order, char compq, char compz,\n                           lapack_int n, lapack_int ilo, lapack_int ihi,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* q, lapack_int ldq,\n                           lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zgghrd( int matrix_order, char compq, char compz,\n                           lapack_int n, lapack_int ilo, lapack_int ihi,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* q, lapack_int ldq,\n                           lapack_complex_double* z, lapack_int ldz );\n\nlapack_int LAPACKE_sgglse( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int p, float* a, lapack_int lda, float* b,\n                           lapack_int ldb, float* c, float* d, float* x );\nlapack_int LAPACKE_dgglse( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int p, double* a, lapack_int lda, double* b,\n                           lapack_int ldb, double* c, double* d, double* x );\nlapack_int LAPACKE_cgglse( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int p, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb, lapack_complex_float* c,\n                           lapack_complex_float* d, lapack_complex_float* x );\nlapack_int LAPACKE_zgglse( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int p, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb, lapack_complex_double* c,\n                           lapack_complex_double* d, lapack_complex_double* x );\n\nlapack_int LAPACKE_sggqrf( int matrix_order, lapack_int n, lapack_int m,\n                           lapack_int p, float* a, lapack_int lda, float* taua,\n                           float* b, lapack_int ldb, float* taub );\nlapack_int LAPACKE_dggqrf( int matrix_order, lapack_int n, lapack_int m,\n                           lapack_int p, double* a, lapack_int lda,\n                           double* taua, double* b, lapack_int ldb,\n                           double* taub );\nlapack_int LAPACKE_cggqrf( int matrix_order, lapack_int n, lapack_int m,\n                           lapack_int p, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* taua,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* taub );\nlapack_int LAPACKE_zggqrf( int matrix_order, lapack_int n, lapack_int m,\n                           lapack_int p, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* taua,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* taub );\n\nlapack_int LAPACKE_sggrqf( int matrix_order, lapack_int m, lapack_int p,\n                           lapack_int n, float* a, lapack_int lda, float* taua,\n                           float* b, lapack_int ldb, float* taub );\nlapack_int LAPACKE_dggrqf( int matrix_order, lapack_int m, lapack_int p,\n                           lapack_int n, double* a, lapack_int lda,\n                           double* taua, double* b, lapack_int ldb,\n                           double* taub );\nlapack_int LAPACKE_cggrqf( int matrix_order, lapack_int m, lapack_int p,\n                           lapack_int n, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* taua,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* taub );\nlapack_int LAPACKE_zggrqf( int matrix_order, lapack_int m, lapack_int p,\n                           lapack_int n, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* taua,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* taub );\n\nlapack_int LAPACKE_sggsvd( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int n, lapack_int p,\n                           lapack_int* k, lapack_int* l, float* a,\n                           lapack_int lda, float* b, lapack_int ldb,\n                           float* alpha, float* beta, float* u, lapack_int ldu,\n                           float* v, lapack_int ldv, float* q, lapack_int ldq,\n                           lapack_int* iwork );\nlapack_int LAPACKE_dggsvd( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int n, lapack_int p,\n                           lapack_int* k, lapack_int* l, double* a,\n                           lapack_int lda, double* b, lapack_int ldb,\n                           double* alpha, double* beta, double* u,\n                           lapack_int ldu, double* v, lapack_int ldv, double* q,\n                           lapack_int ldq, lapack_int* iwork );\nlapack_int LAPACKE_cggsvd( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int n, lapack_int p,\n                           lapack_int* k, lapack_int* l,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb,\n                           float* alpha, float* beta, lapack_complex_float* u,\n                           lapack_int ldu, lapack_complex_float* v,\n                           lapack_int ldv, lapack_complex_float* q,\n                           lapack_int ldq, lapack_int* iwork );\nlapack_int LAPACKE_zggsvd( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int n, lapack_int p,\n                           lapack_int* k, lapack_int* l,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb,\n                           double* alpha, double* beta,\n                           lapack_complex_double* u, lapack_int ldu,\n                           lapack_complex_double* v, lapack_int ldv,\n                           lapack_complex_double* q, lapack_int ldq,\n                           lapack_int* iwork );\n\nlapack_int LAPACKE_sggsvp( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int p, lapack_int n, float* a,\n                           lapack_int lda, float* b, lapack_int ldb, float tola,\n                           float tolb, lapack_int* k, lapack_int* l, float* u,\n                           lapack_int ldu, float* v, lapack_int ldv, float* q,\n                           lapack_int ldq );\nlapack_int LAPACKE_dggsvp( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int p, lapack_int n, double* a,\n                           lapack_int lda, double* b, lapack_int ldb,\n                           double tola, double tolb, lapack_int* k,\n                           lapack_int* l, double* u, lapack_int ldu, double* v,\n                           lapack_int ldv, double* q, lapack_int ldq );\nlapack_int LAPACKE_cggsvp( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int p, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb, float tola,\n                           float tolb, lapack_int* k, lapack_int* l,\n                           lapack_complex_float* u, lapack_int ldu,\n                           lapack_complex_float* v, lapack_int ldv,\n                           lapack_complex_float* q, lapack_int ldq );\nlapack_int LAPACKE_zggsvp( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int p, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb,\n                           double tola, double tolb, lapack_int* k,\n                           lapack_int* l, lapack_complex_double* u,\n                           lapack_int ldu, lapack_complex_double* v,\n                           lapack_int ldv, lapack_complex_double* q,\n                           lapack_int ldq );\n\nlapack_int LAPACKE_sgtcon( char norm, lapack_int n, const float* dl,\n                           const float* d, const float* du, const float* du2,\n                           const lapack_int* ipiv, float anorm, float* rcond );\nlapack_int LAPACKE_dgtcon( char norm, lapack_int n, const double* dl,\n                           const double* d, const double* du, const double* du2,\n                           const lapack_int* ipiv, double anorm,\n                           double* rcond );\nlapack_int LAPACKE_cgtcon( char norm, lapack_int n,\n                           const lapack_complex_float* dl,\n                           const lapack_complex_float* d,\n                           const lapack_complex_float* du,\n                           const lapack_complex_float* du2,\n                           const lapack_int* ipiv, float anorm, float* rcond );\nlapack_int LAPACKE_zgtcon( char norm, lapack_int n,\n                           const lapack_complex_double* dl,\n                           const lapack_complex_double* d,\n                           const lapack_complex_double* du,\n                           const lapack_complex_double* du2,\n                           const lapack_int* ipiv, double anorm,\n                           double* rcond );\n\nlapack_int LAPACKE_sgtrfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const float* dl, const float* d,\n                           const float* du, const float* dlf, const float* df,\n                           const float* duf, const float* du2,\n                           const lapack_int* ipiv, const float* b,\n                           lapack_int ldb, float* x, lapack_int ldx,\n                           float* ferr, float* berr );\nlapack_int LAPACKE_dgtrfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const double* dl, const double* d,\n                           const double* du, const double* dlf,\n                           const double* df, const double* duf,\n                           const double* du2, const lapack_int* ipiv,\n                           const double* b, lapack_int ldb, double* x,\n                           lapack_int ldx, double* ferr, double* berr );\nlapack_int LAPACKE_cgtrfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* dl,\n                           const lapack_complex_float* d,\n                           const lapack_complex_float* du,\n                           const lapack_complex_float* dlf,\n                           const lapack_complex_float* df,\n                           const lapack_complex_float* duf,\n                           const lapack_complex_float* du2,\n                           const lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zgtrfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* dl,\n                           const lapack_complex_double* d,\n                           const lapack_complex_double* du,\n                           const lapack_complex_double* dlf,\n                           const lapack_complex_double* df,\n                           const lapack_complex_double* duf,\n                           const lapack_complex_double* du2,\n                           const lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_sgtsv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          float* dl, float* d, float* du, float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_dgtsv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          double* dl, double* d, double* du, double* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_cgtsv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          lapack_complex_float* dl, lapack_complex_float* d,\n                          lapack_complex_float* du, lapack_complex_float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_zgtsv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          lapack_complex_double* dl, lapack_complex_double* d,\n                          lapack_complex_double* du, lapack_complex_double* b,\n                          lapack_int ldb );\n\nlapack_int LAPACKE_sgtsvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int nrhs, const float* dl,\n                           const float* d, const float* du, float* dlf,\n                           float* df, float* duf, float* du2, lapack_int* ipiv,\n                           const float* b, lapack_int ldb, float* x,\n                           lapack_int ldx, float* rcond, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_dgtsvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int nrhs, const double* dl,\n                           const double* d, const double* du, double* dlf,\n                           double* df, double* duf, double* du2,\n                           lapack_int* ipiv, const double* b, lapack_int ldb,\n                           double* x, lapack_int ldx, double* rcond,\n                           double* ferr, double* berr );\nlapack_int LAPACKE_cgtsvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_float* dl,\n                           const lapack_complex_float* d,\n                           const lapack_complex_float* du,\n                           lapack_complex_float* dlf, lapack_complex_float* df,\n                           lapack_complex_float* duf, lapack_complex_float* du2,\n                           lapack_int* ipiv, const lapack_complex_float* b,\n                           lapack_int ldb, lapack_complex_float* x,\n                           lapack_int ldx, float* rcond, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zgtsvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_double* dl,\n                           const lapack_complex_double* d,\n                           const lapack_complex_double* du,\n                           lapack_complex_double* dlf,\n                           lapack_complex_double* df,\n                           lapack_complex_double* duf,\n                           lapack_complex_double* du2, lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr );\n\nlapack_int LAPACKE_sgttrf( lapack_int n, float* dl, float* d, float* du,\n                           float* du2, lapack_int* ipiv );\nlapack_int LAPACKE_dgttrf( lapack_int n, double* dl, double* d, double* du,\n                           double* du2, lapack_int* ipiv );\nlapack_int LAPACKE_cgttrf( lapack_int n, lapack_complex_float* dl,\n                           lapack_complex_float* d, lapack_complex_float* du,\n                           lapack_complex_float* du2, lapack_int* ipiv );\nlapack_int LAPACKE_zgttrf( lapack_int n, lapack_complex_double* dl,\n                           lapack_complex_double* d, lapack_complex_double* du,\n                           lapack_complex_double* du2, lapack_int* ipiv );\n\nlapack_int LAPACKE_sgttrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const float* dl, const float* d,\n                           const float* du, const float* du2,\n                           const lapack_int* ipiv, float* b, lapack_int ldb );\nlapack_int LAPACKE_dgttrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const double* dl, const double* d,\n                           const double* du, const double* du2,\n                           const lapack_int* ipiv, double* b, lapack_int ldb );\nlapack_int LAPACKE_cgttrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* dl,\n                           const lapack_complex_float* d,\n                           const lapack_complex_float* du,\n                           const lapack_complex_float* du2,\n                           const lapack_int* ipiv, lapack_complex_float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_zgttrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* dl,\n                           const lapack_complex_double* d,\n                           const lapack_complex_double* du,\n                           const lapack_complex_double* du2,\n                           const lapack_int* ipiv, lapack_complex_double* b,\n                           lapack_int ldb );\n\nlapack_int LAPACKE_chbev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_int kd, lapack_complex_float* ab,\n                          lapack_int ldab, float* w, lapack_complex_float* z,\n                          lapack_int ldz );\nlapack_int LAPACKE_zhbev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_int kd, lapack_complex_double* ab,\n                          lapack_int ldab, double* w, lapack_complex_double* z,\n                          lapack_int ldz );\n\nlapack_int LAPACKE_chbevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_int kd, lapack_complex_float* ab,\n                           lapack_int ldab, float* w, lapack_complex_float* z,\n                           lapack_int ldz );\nlapack_int LAPACKE_zhbevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_int kd, lapack_complex_double* ab,\n                           lapack_int ldab, double* w, lapack_complex_double* z,\n                           lapack_int ldz );\n\nlapack_int LAPACKE_chbevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_int kd,\n                           lapack_complex_float* ab, lapack_int ldab,\n                           lapack_complex_float* q, lapack_int ldq, float vl,\n                           float vu, lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, lapack_complex_float* z,\n                           lapack_int ldz, lapack_int* ifail );\nlapack_int LAPACKE_zhbevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_int kd,\n                           lapack_complex_double* ab, lapack_int ldab,\n                           lapack_complex_double* q, lapack_int ldq, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w,\n                           lapack_complex_double* z, lapack_int ldz,\n                           lapack_int* ifail );\n\nlapack_int LAPACKE_chbgst( int matrix_order, char vect, char uplo, lapack_int n,\n                           lapack_int ka, lapack_int kb,\n                           lapack_complex_float* ab, lapack_int ldab,\n                           const lapack_complex_float* bb, lapack_int ldbb,\n                           lapack_complex_float* x, lapack_int ldx );\nlapack_int LAPACKE_zhbgst( int matrix_order, char vect, char uplo, lapack_int n,\n                           lapack_int ka, lapack_int kb,\n                           lapack_complex_double* ab, lapack_int ldab,\n                           const lapack_complex_double* bb, lapack_int ldbb,\n                           lapack_complex_double* x, lapack_int ldx );\n\nlapack_int LAPACKE_chbgv( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_int ka, lapack_int kb,\n                          lapack_complex_float* ab, lapack_int ldab,\n                          lapack_complex_float* bb, lapack_int ldbb, float* w,\n                          lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zhbgv( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_int ka, lapack_int kb,\n                          lapack_complex_double* ab, lapack_int ldab,\n                          lapack_complex_double* bb, lapack_int ldbb, double* w,\n                          lapack_complex_double* z, lapack_int ldz );\n\nlapack_int LAPACKE_chbgvd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_int ka, lapack_int kb,\n                           lapack_complex_float* ab, lapack_int ldab,\n                           lapack_complex_float* bb, lapack_int ldbb, float* w,\n                           lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zhbgvd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_int ka, lapack_int kb,\n                           lapack_complex_double* ab, lapack_int ldab,\n                           lapack_complex_double* bb, lapack_int ldbb,\n                           double* w, lapack_complex_double* z,\n                           lapack_int ldz );\n\nlapack_int LAPACKE_chbgvx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_int ka, lapack_int kb,\n                           lapack_complex_float* ab, lapack_int ldab,\n                           lapack_complex_float* bb, lapack_int ldbb,\n                           lapack_complex_float* q, lapack_int ldq, float vl,\n                           float vu, lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, lapack_complex_float* z,\n                           lapack_int ldz, lapack_int* ifail );\nlapack_int LAPACKE_zhbgvx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_int ka, lapack_int kb,\n                           lapack_complex_double* ab, lapack_int ldab,\n                           lapack_complex_double* bb, lapack_int ldbb,\n                           lapack_complex_double* q, lapack_int ldq, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w,\n                           lapack_complex_double* z, lapack_int ldz,\n                           lapack_int* ifail );\n\nlapack_int LAPACKE_chbtrd( int matrix_order, char vect, char uplo, lapack_int n,\n                           lapack_int kd, lapack_complex_float* ab,\n                           lapack_int ldab, float* d, float* e,\n                           lapack_complex_float* q, lapack_int ldq );\nlapack_int LAPACKE_zhbtrd( int matrix_order, char vect, char uplo, lapack_int n,\n                           lapack_int kd, lapack_complex_double* ab,\n                           lapack_int ldab, double* d, double* e,\n                           lapack_complex_double* q, lapack_int ldq );\n\nlapack_int LAPACKE_checon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_int* ipiv, float anorm, float* rcond );\nlapack_int LAPACKE_zhecon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_int* ipiv, double anorm,\n                           double* rcond );\n\nlapack_int LAPACKE_cheequb( int matrix_order, char uplo, lapack_int n,\n                            const lapack_complex_float* a, lapack_int lda,\n                            float* s, float* scond, float* amax );\nlapack_int LAPACKE_zheequb( int matrix_order, char uplo, lapack_int n,\n                            const lapack_complex_double* a, lapack_int lda,\n                            double* s, double* scond, double* amax );\n\nlapack_int LAPACKE_cheev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_complex_float* a, lapack_int lda, float* w );\nlapack_int LAPACKE_zheev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_complex_double* a, lapack_int lda, double* w );\n\nlapack_int LAPACKE_cheevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda, float* w );\nlapack_int LAPACKE_zheevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           double* w );\n\nlapack_int LAPACKE_cheevr( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_complex_float* a,\n                           lapack_int lda, float vl, float vu, lapack_int il,\n                           lapack_int iu, float abstol, lapack_int* m, float* w,\n                           lapack_complex_float* z, lapack_int ldz,\n                           lapack_int* isuppz );\nlapack_int LAPACKE_zheevr( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_complex_double* a,\n                           lapack_int lda, double vl, double vu, lapack_int il,\n                           lapack_int iu, double abstol, lapack_int* m,\n                           double* w, lapack_complex_double* z, lapack_int ldz,\n                           lapack_int* isuppz );\n\nlapack_int LAPACKE_cheevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_complex_float* a,\n                           lapack_int lda, float vl, float vu, lapack_int il,\n                           lapack_int iu, float abstol, lapack_int* m, float* w,\n                           lapack_complex_float* z, lapack_int ldz,\n                           lapack_int* ifail );\nlapack_int LAPACKE_zheevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_complex_double* a,\n                           lapack_int lda, double vl, double vu, lapack_int il,\n                           lapack_int iu, double abstol, lapack_int* m,\n                           double* w, lapack_complex_double* z, lapack_int ldz,\n                           lapack_int* ifail );\n\nlapack_int LAPACKE_chegst( int matrix_order, lapack_int itype, char uplo,\n                           lapack_int n, lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_zhegst( int matrix_order, lapack_int itype, char uplo,\n                           lapack_int n, lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* b,\n                           lapack_int ldb );\n\nlapack_int LAPACKE_chegv( int matrix_order, lapack_int itype, char jobz,\n                          char uplo, lapack_int n, lapack_complex_float* a,\n                          lapack_int lda, lapack_complex_float* b,\n                          lapack_int ldb, float* w );\nlapack_int LAPACKE_zhegv( int matrix_order, lapack_int itype, char jobz,\n                          char uplo, lapack_int n, lapack_complex_double* a,\n                          lapack_int lda, lapack_complex_double* b,\n                          lapack_int ldb, double* w );\n\nlapack_int LAPACKE_chegvd( int matrix_order, lapack_int itype, char jobz,\n                           char uplo, lapack_int n, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb, float* w );\nlapack_int LAPACKE_zhegvd( int matrix_order, lapack_int itype, char jobz,\n                           char uplo, lapack_int n, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb, double* w );\n\nlapack_int LAPACKE_chegvx( int matrix_order, lapack_int itype, char jobz,\n                           char range, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb, float vl,\n                           float vu, lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, lapack_complex_float* z,\n                           lapack_int ldz, lapack_int* ifail );\nlapack_int LAPACKE_zhegvx( int matrix_order, lapack_int itype, char jobz,\n                           char range, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w,\n                           lapack_complex_double* z, lapack_int ldz,\n                           lapack_int* ifail );\n\nlapack_int LAPACKE_cherfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* af,\n                           lapack_int ldaf, const lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zherfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* af,\n                           lapack_int ldaf, const lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_cherfsx( int matrix_order, char uplo, char equed,\n                            lapack_int n, lapack_int nrhs,\n                            const lapack_complex_float* a, lapack_int lda,\n                            const lapack_complex_float* af, lapack_int ldaf,\n                            const lapack_int* ipiv, const float* s,\n                            const lapack_complex_float* b, lapack_int ldb,\n                            lapack_complex_float* x, lapack_int ldx,\n                            float* rcond, float* berr, lapack_int n_err_bnds,\n                            float* err_bnds_norm, float* err_bnds_comp,\n                            lapack_int nparams, float* params );\nlapack_int LAPACKE_zherfsx( int matrix_order, char uplo, char equed,\n                            lapack_int n, lapack_int nrhs,\n                            const lapack_complex_double* a, lapack_int lda,\n                            const lapack_complex_double* af, lapack_int ldaf,\n                            const lapack_int* ipiv, const double* s,\n                            const lapack_complex_double* b, lapack_int ldb,\n                            lapack_complex_double* x, lapack_int ldx,\n                            double* rcond, double* berr, lapack_int n_err_bnds,\n                            double* err_bnds_norm, double* err_bnds_comp,\n                            lapack_int nparams, double* params );\n\nlapack_int LAPACKE_chesv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_float* a,\n                          lapack_int lda, lapack_int* ipiv,\n                          lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zhesv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_double* a,\n                          lapack_int lda, lapack_int* ipiv,\n                          lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_chesvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* af,\n                           lapack_int ldaf, lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr );\nlapack_int LAPACKE_zhesvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* af,\n                           lapack_int ldaf, lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr );\n\nlapack_int LAPACKE_chesvxx( int matrix_order, char fact, char uplo,\n                            lapack_int n, lapack_int nrhs,\n                            lapack_complex_float* a, lapack_int lda,\n                            lapack_complex_float* af, lapack_int ldaf,\n                            lapack_int* ipiv, char* equed, float* s,\n                            lapack_complex_float* b, lapack_int ldb,\n                            lapack_complex_float* x, lapack_int ldx,\n                            float* rcond, float* rpvgrw, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_zhesvxx( int matrix_order, char fact, char uplo,\n                            lapack_int n, lapack_int nrhs,\n                            lapack_complex_double* a, lapack_int lda,\n                            lapack_complex_double* af, lapack_int ldaf,\n                            lapack_int* ipiv, char* equed, double* s,\n                            lapack_complex_double* b, lapack_int ldb,\n                            lapack_complex_double* x, lapack_int ldx,\n                            double* rcond, double* rpvgrw, double* berr,\n                            lapack_int n_err_bnds, double* err_bnds_norm,\n                            double* err_bnds_comp, lapack_int nparams,\n                            double* params );\n\nlapack_int LAPACKE_chetrd( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda, float* d,\n                           float* e, lapack_complex_float* tau );\nlapack_int LAPACKE_zhetrd( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda, double* d,\n                           double* e, lapack_complex_double* tau );\n\nlapack_int LAPACKE_chetrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* ipiv );\nlapack_int LAPACKE_zhetrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* ipiv );\n\nlapack_int LAPACKE_chetri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           const lapack_int* ipiv );\nlapack_int LAPACKE_zhetri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           const lapack_int* ipiv );\n\nlapack_int LAPACKE_chetrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, const lapack_int* ipiv,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zhetrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, const lapack_int* ipiv,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_chfrk( int matrix_order, char transr, char uplo, char trans,\n                          lapack_int n, lapack_int k, float alpha,\n                          const lapack_complex_float* a, lapack_int lda,\n                          float beta, lapack_complex_float* c );\nlapack_int LAPACKE_zhfrk( int matrix_order, char transr, char uplo, char trans,\n                          lapack_int n, lapack_int k, double alpha,\n                          const lapack_complex_double* a, lapack_int lda,\n                          double beta, lapack_complex_double* c );\n\nlapack_int LAPACKE_shgeqz( int matrix_order, char job, char compq, char compz,\n                           lapack_int n, lapack_int ilo, lapack_int ihi,\n                           float* h, lapack_int ldh, float* t, lapack_int ldt,\n                           float* alphar, float* alphai, float* beta, float* q,\n                           lapack_int ldq, float* z, lapack_int ldz );\nlapack_int LAPACKE_dhgeqz( int matrix_order, char job, char compq, char compz,\n                           lapack_int n, lapack_int ilo, lapack_int ihi,\n                           double* h, lapack_int ldh, double* t, lapack_int ldt,\n                           double* alphar, double* alphai, double* beta,\n                           double* q, lapack_int ldq, double* z,\n                           lapack_int ldz );\nlapack_int LAPACKE_chgeqz( int matrix_order, char job, char compq, char compz,\n                           lapack_int n, lapack_int ilo, lapack_int ihi,\n                           lapack_complex_float* h, lapack_int ldh,\n                           lapack_complex_float* t, lapack_int ldt,\n                           lapack_complex_float* alpha,\n                           lapack_complex_float* beta, lapack_complex_float* q,\n                           lapack_int ldq, lapack_complex_float* z,\n                           lapack_int ldz );\nlapack_int LAPACKE_zhgeqz( int matrix_order, char job, char compq, char compz,\n                           lapack_int n, lapack_int ilo, lapack_int ihi,\n                           lapack_complex_double* h, lapack_int ldh,\n                           lapack_complex_double* t, lapack_int ldt,\n                           lapack_complex_double* alpha,\n                           lapack_complex_double* beta,\n                           lapack_complex_double* q, lapack_int ldq,\n                           lapack_complex_double* z, lapack_int ldz );\n\nlapack_int LAPACKE_chpcon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_float* ap,\n                           const lapack_int* ipiv, float anorm, float* rcond );\nlapack_int LAPACKE_zhpcon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_double* ap,\n                           const lapack_int* ipiv, double anorm,\n                           double* rcond );\n\nlapack_int LAPACKE_chpev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_complex_float* ap, float* w,\n                          lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zhpev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_complex_double* ap, double* w,\n                          lapack_complex_double* z, lapack_int ldz );\n\nlapack_int LAPACKE_chpevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_complex_float* ap, float* w,\n                           lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zhpevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_complex_double* ap, double* w,\n                           lapack_complex_double* z, lapack_int ldz );\n\nlapack_int LAPACKE_chpevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_complex_float* ap, float vl,\n                           float vu, lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, lapack_complex_float* z,\n                           lapack_int ldz, lapack_int* ifail );\nlapack_int LAPACKE_zhpevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_complex_double* ap, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w,\n                           lapack_complex_double* z, lapack_int ldz,\n                           lapack_int* ifail );\n\nlapack_int LAPACKE_chpgst( int matrix_order, lapack_int itype, char uplo,\n                           lapack_int n, lapack_complex_float* ap,\n                           const lapack_complex_float* bp );\nlapack_int LAPACKE_zhpgst( int matrix_order, lapack_int itype, char uplo,\n                           lapack_int n, lapack_complex_double* ap,\n                           const lapack_complex_double* bp );\n\nlapack_int LAPACKE_chpgv( int matrix_order, lapack_int itype, char jobz,\n                          char uplo, lapack_int n, lapack_complex_float* ap,\n                          lapack_complex_float* bp, float* w,\n                          lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zhpgv( int matrix_order, lapack_int itype, char jobz,\n                          char uplo, lapack_int n, lapack_complex_double* ap,\n                          lapack_complex_double* bp, double* w,\n                          lapack_complex_double* z, lapack_int ldz );\n\nlapack_int LAPACKE_chpgvd( int matrix_order, lapack_int itype, char jobz,\n                           char uplo, lapack_int n, lapack_complex_float* ap,\n                           lapack_complex_float* bp, float* w,\n                           lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zhpgvd( int matrix_order, lapack_int itype, char jobz,\n                           char uplo, lapack_int n, lapack_complex_double* ap,\n                           lapack_complex_double* bp, double* w,\n                           lapack_complex_double* z, lapack_int ldz );\n\nlapack_int LAPACKE_chpgvx( int matrix_order, lapack_int itype, char jobz,\n                           char range, char uplo, lapack_int n,\n                           lapack_complex_float* ap, lapack_complex_float* bp,\n                           float vl, float vu, lapack_int il, lapack_int iu,\n                           float abstol, lapack_int* m, float* w,\n                           lapack_complex_float* z, lapack_int ldz,\n                           lapack_int* ifail );\nlapack_int LAPACKE_zhpgvx( int matrix_order, lapack_int itype, char jobz,\n                           char range, char uplo, lapack_int n,\n                           lapack_complex_double* ap, lapack_complex_double* bp,\n                           double vl, double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w,\n                           lapack_complex_double* z, lapack_int ldz,\n                           lapack_int* ifail );\n\nlapack_int LAPACKE_chprfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* ap,\n                           const lapack_complex_float* afp,\n                           const lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zhprfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* ap,\n                           const lapack_complex_double* afp,\n                           const lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_chpsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_float* ap,\n                          lapack_int* ipiv, lapack_complex_float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_zhpsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_double* ap,\n                          lapack_int* ipiv, lapack_complex_double* b,\n                          lapack_int ldb );\n\nlapack_int LAPACKE_chpsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* ap,\n                           lapack_complex_float* afp, lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr );\nlapack_int LAPACKE_zhpsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* ap,\n                           lapack_complex_double* afp, lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr );\n\nlapack_int LAPACKE_chptrd( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* ap, float* d, float* e,\n                           lapack_complex_float* tau );\nlapack_int LAPACKE_zhptrd( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* ap, double* d, double* e,\n                           lapack_complex_double* tau );\n\nlapack_int LAPACKE_chptrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* ap, lapack_int* ipiv );\nlapack_int LAPACKE_zhptrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* ap, lapack_int* ipiv );\n\nlapack_int LAPACKE_chptri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* ap, const lapack_int* ipiv );\nlapack_int LAPACKE_zhptri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* ap, const lapack_int* ipiv );\n\nlapack_int LAPACKE_chptrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* ap,\n                           const lapack_int* ipiv, lapack_complex_float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_zhptrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* ap,\n                           const lapack_int* ipiv, lapack_complex_double* b,\n                           lapack_int ldb );\n\nlapack_int LAPACKE_shsein( int matrix_order, char job, char eigsrc, char initv,\n                           lapack_logical* select, lapack_int n, const float* h,\n                           lapack_int ldh, float* wr, const float* wi,\n                           float* vl, lapack_int ldvl, float* vr,\n                           lapack_int ldvr, lapack_int mm, lapack_int* m,\n                           lapack_int* ifaill, lapack_int* ifailr );\nlapack_int LAPACKE_dhsein( int matrix_order, char job, char eigsrc, char initv,\n                           lapack_logical* select, lapack_int n,\n                           const double* h, lapack_int ldh, double* wr,\n                           const double* wi, double* vl, lapack_int ldvl,\n                           double* vr, lapack_int ldvr, lapack_int mm,\n                           lapack_int* m, lapack_int* ifaill,\n                           lapack_int* ifailr );\nlapack_int LAPACKE_chsein( int matrix_order, char job, char eigsrc, char initv,\n                           const lapack_logical* select, lapack_int n,\n                           const lapack_complex_float* h, lapack_int ldh,\n                           lapack_complex_float* w, lapack_complex_float* vl,\n                           lapack_int ldvl, lapack_complex_float* vr,\n                           lapack_int ldvr, lapack_int mm, lapack_int* m,\n                           lapack_int* ifaill, lapack_int* ifailr );\nlapack_int LAPACKE_zhsein( int matrix_order, char job, char eigsrc, char initv,\n                           const lapack_logical* select, lapack_int n,\n                           const lapack_complex_double* h, lapack_int ldh,\n                           lapack_complex_double* w, lapack_complex_double* vl,\n                           lapack_int ldvl, lapack_complex_double* vr,\n                           lapack_int ldvr, lapack_int mm, lapack_int* m,\n                           lapack_int* ifaill, lapack_int* ifailr );\n\nlapack_int LAPACKE_shseqr( int matrix_order, char job, char compz, lapack_int n,\n                           lapack_int ilo, lapack_int ihi, float* h,\n                           lapack_int ldh, float* wr, float* wi, float* z,\n                           lapack_int ldz );\nlapack_int LAPACKE_dhseqr( int matrix_order, char job, char compz, lapack_int n,\n                           lapack_int ilo, lapack_int ihi, double* h,\n                           lapack_int ldh, double* wr, double* wi, double* z,\n                           lapack_int ldz );\nlapack_int LAPACKE_chseqr( int matrix_order, char job, char compz, lapack_int n,\n                           lapack_int ilo, lapack_int ihi,\n                           lapack_complex_float* h, lapack_int ldh,\n                           lapack_complex_float* w, lapack_complex_float* z,\n                           lapack_int ldz );\nlapack_int LAPACKE_zhseqr( int matrix_order, char job, char compz, lapack_int n,\n                           lapack_int ilo, lapack_int ihi,\n                           lapack_complex_double* h, lapack_int ldh,\n                           lapack_complex_double* w, lapack_complex_double* z,\n                           lapack_int ldz );\n\nlapack_int LAPACKE_clacgv( lapack_int n, lapack_complex_float* x,\n                           lapack_int incx );\nlapack_int LAPACKE_zlacgv( lapack_int n, lapack_complex_double* x,\n                           lapack_int incx );\n\nlapack_int LAPACKE_slacn2( lapack_int n, float* v, float* x, lapack_int* isgn,\n                           float* est, lapack_int* kase, lapack_int* isave );\nlapack_int LAPACKE_dlacn2( lapack_int n, double* v, double* x, lapack_int* isgn,\n                           double* est, lapack_int* kase, lapack_int* isave );\nlapack_int LAPACKE_clacn2( lapack_int n, lapack_complex_float* v,\n                           lapack_complex_float* x,\n                           float* est, lapack_int* kase, lapack_int* isave );\nlapack_int LAPACKE_zlacn2( lapack_int n, lapack_complex_double* v,\n                           lapack_complex_double* x,\n                           double* est, lapack_int* kase, lapack_int* isave );\n\nlapack_int LAPACKE_slacpy( int matrix_order, char uplo, lapack_int m,\n                           lapack_int n, const float* a, lapack_int lda, float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_dlacpy( int matrix_order, char uplo, lapack_int m,\n                           lapack_int n, const double* a, lapack_int lda, double* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_clacpy( int matrix_order, char uplo, lapack_int m,\n                           lapack_int n, const lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_zlacpy( int matrix_order, char uplo, lapack_int m,\n                           lapack_int n, const lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb );\n\nlapack_int LAPACKE_clacp2( int matrix_order, char uplo, lapack_int m,\n                           lapack_int n, const float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zlacp2( int matrix_order, char uplo, lapack_int m,\n                           lapack_int n, const double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_zlag2c( int matrix_order, lapack_int m, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_float* sa, lapack_int ldsa );\n\nlapack_int LAPACKE_slag2d( int matrix_order, lapack_int m, lapack_int n,\n                           const float* sa, lapack_int ldsa, double* a,\n                           lapack_int lda );\n\nlapack_int LAPACKE_dlag2s( int matrix_order, lapack_int m, lapack_int n,\n                           const double* a, lapack_int lda, float* sa,\n                           lapack_int ldsa );\n\nlapack_int LAPACKE_clag2z( int matrix_order, lapack_int m, lapack_int n,\n                           const lapack_complex_float* sa, lapack_int ldsa,\n                           lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_slagge( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku, const float* d,\n                           float* a, lapack_int lda, lapack_int* iseed );\nlapack_int LAPACKE_dlagge( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku, const double* d,\n                           double* a, lapack_int lda, lapack_int* iseed );\nlapack_int LAPACKE_clagge( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku, const float* d,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* iseed );\nlapack_int LAPACKE_zlagge( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku, const double* d,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* iseed );\n\nfloat LAPACKE_slamch( char cmach );\ndouble LAPACKE_dlamch( char cmach );\n\nfloat LAPACKE_slange( int matrix_order, char norm, lapack_int m,\n                           lapack_int n, const float* a, lapack_int lda );\ndouble LAPACKE_dlange( int matrix_order, char norm, lapack_int m,\n                           lapack_int n, const double* a, lapack_int lda );\nfloat LAPACKE_clange( int matrix_order, char norm, lapack_int m,\n                           lapack_int n, const lapack_complex_float* a,\n                           lapack_int lda );\ndouble LAPACKE_zlange( int matrix_order, char norm, lapack_int m,\n                           lapack_int n, const lapack_complex_double* a,\n                           lapack_int lda );\n\nfloat LAPACKE_clanhe( int matrix_order, char norm, char uplo, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda );\ndouble LAPACKE_zlanhe( int matrix_order, char norm, char uplo, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda );\n\nfloat LAPACKE_slansy( int matrix_order, char norm, char uplo, lapack_int n,\n                           const float* a, lapack_int lda );\ndouble LAPACKE_dlansy( int matrix_order, char norm, char uplo, lapack_int n,\n                           const double* a, lapack_int lda );\nfloat LAPACKE_clansy( int matrix_order, char norm, char uplo, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda );\ndouble LAPACKE_zlansy( int matrix_order, char norm, char uplo, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda );\n\nfloat LAPACKE_slantr( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int m, lapack_int n, const float* a,\n                           lapack_int lda );\ndouble LAPACKE_dlantr( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int m, lapack_int n, const double* a,\n                           lapack_int lda );\nfloat LAPACKE_clantr( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int m, lapack_int n, const lapack_complex_float* a,\n                           lapack_int lda );\ndouble LAPACKE_zlantr( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int m, lapack_int n, const lapack_complex_double* a,\n                           lapack_int lda );\n\n\nlapack_int LAPACKE_slarfb( int matrix_order, char side, char trans, char direct,\n                           char storev, lapack_int m, lapack_int n,\n                           lapack_int k, const float* v, lapack_int ldv,\n                           const float* t, lapack_int ldt, float* c,\n                           lapack_int ldc );\nlapack_int LAPACKE_dlarfb( int matrix_order, char side, char trans, char direct,\n                           char storev, lapack_int m, lapack_int n,\n                           lapack_int k, const double* v, lapack_int ldv,\n                           const double* t, lapack_int ldt, double* c,\n                           lapack_int ldc );\nlapack_int LAPACKE_clarfb( int matrix_order, char side, char trans, char direct,\n                           char storev, lapack_int m, lapack_int n,\n                           lapack_int k, const lapack_complex_float* v,\n                           lapack_int ldv, const lapack_complex_float* t,\n                           lapack_int ldt, lapack_complex_float* c,\n                           lapack_int ldc );\nlapack_int LAPACKE_zlarfb( int matrix_order, char side, char trans, char direct,\n                           char storev, lapack_int m, lapack_int n,\n                           lapack_int k, const lapack_complex_double* v,\n                           lapack_int ldv, const lapack_complex_double* t,\n                           lapack_int ldt, lapack_complex_double* c,\n                           lapack_int ldc );\n\nlapack_int LAPACKE_slarfg( lapack_int n, float* alpha, float* x,\n                           lapack_int incx, float* tau );\nlapack_int LAPACKE_dlarfg( lapack_int n, double* alpha, double* x,\n                           lapack_int incx, double* tau );\nlapack_int LAPACKE_clarfg( lapack_int n, lapack_complex_float* alpha,\n                           lapack_complex_float* x, lapack_int incx,\n                           lapack_complex_float* tau );\nlapack_int LAPACKE_zlarfg( lapack_int n, lapack_complex_double* alpha,\n                           lapack_complex_double* x, lapack_int incx,\n                           lapack_complex_double* tau );\n\nlapack_int LAPACKE_slarft( int matrix_order, char direct, char storev,\n                           lapack_int n, lapack_int k, const float* v,\n                           lapack_int ldv, const float* tau, float* t,\n                           lapack_int ldt );\nlapack_int LAPACKE_dlarft( int matrix_order, char direct, char storev,\n                           lapack_int n, lapack_int k, const double* v,\n                           lapack_int ldv, const double* tau, double* t,\n                           lapack_int ldt );\nlapack_int LAPACKE_clarft( int matrix_order, char direct, char storev,\n                           lapack_int n, lapack_int k,\n                           const lapack_complex_float* v, lapack_int ldv,\n                           const lapack_complex_float* tau,\n                           lapack_complex_float* t, lapack_int ldt );\nlapack_int LAPACKE_zlarft( int matrix_order, char direct, char storev,\n                           lapack_int n, lapack_int k,\n                           const lapack_complex_double* v, lapack_int ldv,\n                           const lapack_complex_double* tau,\n                           lapack_complex_double* t, lapack_int ldt );\n\nlapack_int LAPACKE_slarfx( int matrix_order, char side, lapack_int m,\n                           lapack_int n, const float* v, float tau, float* c,\n                           lapack_int ldc, float* work );\nlapack_int LAPACKE_dlarfx( int matrix_order, char side, lapack_int m,\n                           lapack_int n, const double* v, double tau, double* c,\n                           lapack_int ldc, double* work );\nlapack_int LAPACKE_clarfx( int matrix_order, char side, lapack_int m,\n                           lapack_int n, const lapack_complex_float* v,\n                           lapack_complex_float tau, lapack_complex_float* c,\n                           lapack_int ldc, lapack_complex_float* work );\nlapack_int LAPACKE_zlarfx( int matrix_order, char side, lapack_int m,\n                           lapack_int n, const lapack_complex_double* v,\n                           lapack_complex_double tau, lapack_complex_double* c,\n                           lapack_int ldc, lapack_complex_double* work );\n\nlapack_int LAPACKE_slarnv( lapack_int idist, lapack_int* iseed, lapack_int n,\n                           float* x );\nlapack_int LAPACKE_dlarnv( lapack_int idist, lapack_int* iseed, lapack_int n,\n                           double* x );\nlapack_int LAPACKE_clarnv( lapack_int idist, lapack_int* iseed, lapack_int n,\n                           lapack_complex_float* x );\nlapack_int LAPACKE_zlarnv( lapack_int idist, lapack_int* iseed, lapack_int n,\n                           lapack_complex_double* x );\n\nlapack_int LAPACKE_slaset( int matrix_order, char uplo, lapack_int m,\n                           lapack_int n, float alpha, float beta, float* a,\n                           lapack_int lda );\nlapack_int LAPACKE_dlaset( int matrix_order, char uplo, lapack_int m,\n                           lapack_int n, double alpha, double beta, double* a,\n                           lapack_int lda );\nlapack_int LAPACKE_claset( int matrix_order, char uplo, lapack_int m,\n                           lapack_int n, lapack_complex_float alpha,\n                           lapack_complex_float beta, lapack_complex_float* a,\n                           lapack_int lda );\nlapack_int LAPACKE_zlaset( int matrix_order, char uplo, lapack_int m,\n                           lapack_int n, lapack_complex_double alpha,\n                           lapack_complex_double beta, lapack_complex_double* a,\n                           lapack_int lda );\n\nlapack_int LAPACKE_slasrt( char id, lapack_int n, float* d );\nlapack_int LAPACKE_dlasrt( char id, lapack_int n, double* d );\n\nlapack_int LAPACKE_slaswp( int matrix_order, lapack_int n, float* a,\n                           lapack_int lda, lapack_int k1, lapack_int k2,\n                           const lapack_int* ipiv, lapack_int incx );\nlapack_int LAPACKE_dlaswp( int matrix_order, lapack_int n, double* a,\n                           lapack_int lda, lapack_int k1, lapack_int k2,\n                           const lapack_int* ipiv, lapack_int incx );\nlapack_int LAPACKE_claswp( int matrix_order, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int k1, lapack_int k2, const lapack_int* ipiv,\n                           lapack_int incx );\nlapack_int LAPACKE_zlaswp( int matrix_order, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int k1, lapack_int k2, const lapack_int* ipiv,\n                           lapack_int incx );\n\nlapack_int LAPACKE_slatms( int matrix_order, lapack_int m, lapack_int n,\n                           char dist, lapack_int* iseed, char sym, float* d,\n                           lapack_int mode, float cond, float dmax,\n                           lapack_int kl, lapack_int ku, char pack, float* a,\n                           lapack_int lda );\nlapack_int LAPACKE_dlatms( int matrix_order, lapack_int m, lapack_int n,\n                           char dist, lapack_int* iseed, char sym, double* d,\n                           lapack_int mode, double cond, double dmax,\n                           lapack_int kl, lapack_int ku, char pack, double* a,\n                           lapack_int lda );\nlapack_int LAPACKE_clatms( int matrix_order, lapack_int m, lapack_int n,\n                           char dist, lapack_int* iseed, char sym, float* d,\n                           lapack_int mode, float cond, float dmax,\n                           lapack_int kl, lapack_int ku, char pack,\n                           lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_zlatms( int matrix_order, lapack_int m, lapack_int n,\n                           char dist, lapack_int* iseed, char sym, double* d,\n                           lapack_int mode, double cond, double dmax,\n                           lapack_int kl, lapack_int ku, char pack,\n                           lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_slauum( int matrix_order, char uplo, lapack_int n, float* a,\n                           lapack_int lda );\nlapack_int LAPACKE_dlauum( int matrix_order, char uplo, lapack_int n, double* a,\n                           lapack_int lda );\nlapack_int LAPACKE_clauum( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_zlauum( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_sopgtr( int matrix_order, char uplo, lapack_int n,\n                           const float* ap, const float* tau, float* q,\n                           lapack_int ldq );\nlapack_int LAPACKE_dopgtr( int matrix_order, char uplo, lapack_int n,\n                           const double* ap, const double* tau, double* q,\n                           lapack_int ldq );\n\nlapack_int LAPACKE_sopmtr( int matrix_order, char side, char uplo, char trans,\n                           lapack_int m, lapack_int n, const float* ap,\n                           const float* tau, float* c, lapack_int ldc );\nlapack_int LAPACKE_dopmtr( int matrix_order, char side, char uplo, char trans,\n                           lapack_int m, lapack_int n, const double* ap,\n                           const double* tau, double* c, lapack_int ldc );\n\nlapack_int LAPACKE_sorgbr( int matrix_order, char vect, lapack_int m,\n                           lapack_int n, lapack_int k, float* a, lapack_int lda,\n                           const float* tau );\nlapack_int LAPACKE_dorgbr( int matrix_order, char vect, lapack_int m,\n                           lapack_int n, lapack_int k, double* a,\n                           lapack_int lda, const double* tau );\n\nlapack_int LAPACKE_sorghr( int matrix_order, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, float* a, lapack_int lda,\n                           const float* tau );\nlapack_int LAPACKE_dorghr( int matrix_order, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, double* a, lapack_int lda,\n                           const double* tau );\n\nlapack_int LAPACKE_sorglq( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, float* a, lapack_int lda,\n                           const float* tau );\nlapack_int LAPACKE_dorglq( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, double* a, lapack_int lda,\n                           const double* tau );\n\nlapack_int LAPACKE_sorgql( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, float* a, lapack_int lda,\n                           const float* tau );\nlapack_int LAPACKE_dorgql( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, double* a, lapack_int lda,\n                           const double* tau );\n\nlapack_int LAPACKE_sorgqr( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, float* a, lapack_int lda,\n                           const float* tau );\nlapack_int LAPACKE_dorgqr( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, double* a, lapack_int lda,\n                           const double* tau );\n\nlapack_int LAPACKE_sorgrq( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, float* a, lapack_int lda,\n                           const float* tau );\nlapack_int LAPACKE_dorgrq( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, double* a, lapack_int lda,\n                           const double* tau );\n\nlapack_int LAPACKE_sorgtr( int matrix_order, char uplo, lapack_int n, float* a,\n                           lapack_int lda, const float* tau );\nlapack_int LAPACKE_dorgtr( int matrix_order, char uplo, lapack_int n, double* a,\n                           lapack_int lda, const double* tau );\n\nlapack_int LAPACKE_sormbr( int matrix_order, char vect, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const float* a, lapack_int lda, const float* tau,\n                           float* c, lapack_int ldc );\nlapack_int LAPACKE_dormbr( int matrix_order, char vect, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const double* a, lapack_int lda, const double* tau,\n                           double* c, lapack_int ldc );\n\nlapack_int LAPACKE_sormhr( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, const float* a, lapack_int lda,\n                           const float* tau, float* c, lapack_int ldc );\nlapack_int LAPACKE_dormhr( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, const double* a, lapack_int lda,\n                           const double* tau, double* c, lapack_int ldc );\n\nlapack_int LAPACKE_sormlq( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const float* a, lapack_int lda, const float* tau,\n                           float* c, lapack_int ldc );\nlapack_int LAPACKE_dormlq( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const double* a, lapack_int lda, const double* tau,\n                           double* c, lapack_int ldc );\n\nlapack_int LAPACKE_sormql( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const float* a, lapack_int lda, const float* tau,\n                           float* c, lapack_int ldc );\nlapack_int LAPACKE_dormql( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const double* a, lapack_int lda, const double* tau,\n                           double* c, lapack_int ldc );\n\nlapack_int LAPACKE_sormqr( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const float* a, lapack_int lda, const float* tau,\n                           float* c, lapack_int ldc );\nlapack_int LAPACKE_dormqr( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const double* a, lapack_int lda, const double* tau,\n                           double* c, lapack_int ldc );\n\nlapack_int LAPACKE_sormrq( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const float* a, lapack_int lda, const float* tau,\n                           float* c, lapack_int ldc );\nlapack_int LAPACKE_dormrq( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const double* a, lapack_int lda, const double* tau,\n                           double* c, lapack_int ldc );\n\nlapack_int LAPACKE_sormrz( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           lapack_int l, const float* a, lapack_int lda,\n                           const float* tau, float* c, lapack_int ldc );\nlapack_int LAPACKE_dormrz( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           lapack_int l, const double* a, lapack_int lda,\n                           const double* tau, double* c, lapack_int ldc );\n\nlapack_int LAPACKE_sormtr( int matrix_order, char side, char uplo, char trans,\n                           lapack_int m, lapack_int n, const float* a,\n                           lapack_int lda, const float* tau, float* c,\n                           lapack_int ldc );\nlapack_int LAPACKE_dormtr( int matrix_order, char side, char uplo, char trans,\n                           lapack_int m, lapack_int n, const double* a,\n                           lapack_int lda, const double* tau, double* c,\n                           lapack_int ldc );\n\nlapack_int LAPACKE_spbcon( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, const float* ab, lapack_int ldab,\n                           float anorm, float* rcond );\nlapack_int LAPACKE_dpbcon( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, const double* ab, lapack_int ldab,\n                           double anorm, double* rcond );\nlapack_int LAPACKE_cpbcon( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, const lapack_complex_float* ab,\n                           lapack_int ldab, float anorm, float* rcond );\nlapack_int LAPACKE_zpbcon( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, const lapack_complex_double* ab,\n                           lapack_int ldab, double anorm, double* rcond );\n\nlapack_int LAPACKE_spbequ( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, const float* ab, lapack_int ldab,\n                           float* s, float* scond, float* amax );\nlapack_int LAPACKE_dpbequ( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, const double* ab, lapack_int ldab,\n                           double* s, double* scond, double* amax );\nlapack_int LAPACKE_cpbequ( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, const lapack_complex_float* ab,\n                           lapack_int ldab, float* s, float* scond,\n                           float* amax );\nlapack_int LAPACKE_zpbequ( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, const lapack_complex_double* ab,\n                           lapack_int ldab, double* s, double* scond,\n                           double* amax );\n\nlapack_int LAPACKE_spbrfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs, const float* ab,\n                           lapack_int ldab, const float* afb, lapack_int ldafb,\n                           const float* b, lapack_int ldb, float* x,\n                           lapack_int ldx, float* ferr, float* berr );\nlapack_int LAPACKE_dpbrfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs, const double* ab,\n                           lapack_int ldab, const double* afb, lapack_int ldafb,\n                           const double* b, lapack_int ldb, double* x,\n                           lapack_int ldx, double* ferr, double* berr );\nlapack_int LAPACKE_cpbrfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs,\n                           const lapack_complex_float* ab, lapack_int ldab,\n                           const lapack_complex_float* afb, lapack_int ldafb,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zpbrfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs,\n                           const lapack_complex_double* ab, lapack_int ldab,\n                           const lapack_complex_double* afb, lapack_int ldafb,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_spbstf( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kb, float* bb, lapack_int ldbb );\nlapack_int LAPACKE_dpbstf( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kb, double* bb, lapack_int ldbb );\nlapack_int LAPACKE_cpbstf( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kb, lapack_complex_float* bb,\n                           lapack_int ldbb );\nlapack_int LAPACKE_zpbstf( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kb, lapack_complex_double* bb,\n                           lapack_int ldbb );\n\nlapack_int LAPACKE_spbsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int kd, lapack_int nrhs, float* ab,\n                          lapack_int ldab, float* b, lapack_int ldb );\nlapack_int LAPACKE_dpbsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int kd, lapack_int nrhs, double* ab,\n                          lapack_int ldab, double* b, lapack_int ldb );\nlapack_int LAPACKE_cpbsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int kd, lapack_int nrhs,\n                          lapack_complex_float* ab, lapack_int ldab,\n                          lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zpbsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int kd, lapack_int nrhs,\n                          lapack_complex_double* ab, lapack_int ldab,\n                          lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_spbsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs, float* ab,\n                           lapack_int ldab, float* afb, lapack_int ldafb,\n                           char* equed, float* s, float* b, lapack_int ldb,\n                           float* x, lapack_int ldx, float* rcond, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_dpbsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs, double* ab,\n                           lapack_int ldab, double* afb, lapack_int ldafb,\n                           char* equed, double* s, double* b, lapack_int ldb,\n                           double* x, lapack_int ldx, double* rcond,\n                           double* ferr, double* berr );\nlapack_int LAPACKE_cpbsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs,\n                           lapack_complex_float* ab, lapack_int ldab,\n                           lapack_complex_float* afb, lapack_int ldafb,\n                           char* equed, float* s, lapack_complex_float* b,\n                           lapack_int ldb, lapack_complex_float* x,\n                           lapack_int ldx, float* rcond, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zpbsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs,\n                           lapack_complex_double* ab, lapack_int ldab,\n                           lapack_complex_double* afb, lapack_int ldafb,\n                           char* equed, double* s, lapack_complex_double* b,\n                           lapack_int ldb, lapack_complex_double* x,\n                           lapack_int ldx, double* rcond, double* ferr,\n                           double* berr );\n\nlapack_int LAPACKE_spbtrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, float* ab, lapack_int ldab );\nlapack_int LAPACKE_dpbtrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, double* ab, lapack_int ldab );\nlapack_int LAPACKE_cpbtrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, lapack_complex_float* ab,\n                           lapack_int ldab );\nlapack_int LAPACKE_zpbtrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, lapack_complex_double* ab,\n                           lapack_int ldab );\n\nlapack_int LAPACKE_spbtrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs, const float* ab,\n                           lapack_int ldab, float* b, lapack_int ldb );\nlapack_int LAPACKE_dpbtrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs, const double* ab,\n                           lapack_int ldab, double* b, lapack_int ldb );\nlapack_int LAPACKE_cpbtrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs,\n                           const lapack_complex_float* ab, lapack_int ldab,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zpbtrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs,\n                           const lapack_complex_double* ab, lapack_int ldab,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_spftrf( int matrix_order, char transr, char uplo,\n                           lapack_int n, float* a );\nlapack_int LAPACKE_dpftrf( int matrix_order, char transr, char uplo,\n                           lapack_int n, double* a );\nlapack_int LAPACKE_cpftrf( int matrix_order, char transr, char uplo,\n                           lapack_int n, lapack_complex_float* a );\nlapack_int LAPACKE_zpftrf( int matrix_order, char transr, char uplo,\n                           lapack_int n, lapack_complex_double* a );\n\nlapack_int LAPACKE_spftri( int matrix_order, char transr, char uplo,\n                           lapack_int n, float* a );\nlapack_int LAPACKE_dpftri( int matrix_order, char transr, char uplo,\n                           lapack_int n, double* a );\nlapack_int LAPACKE_cpftri( int matrix_order, char transr, char uplo,\n                           lapack_int n, lapack_complex_float* a );\nlapack_int LAPACKE_zpftri( int matrix_order, char transr, char uplo,\n                           lapack_int n, lapack_complex_double* a );\n\nlapack_int LAPACKE_spftrs( int matrix_order, char transr, char uplo,\n                           lapack_int n, lapack_int nrhs, const float* a,\n                           float* b, lapack_int ldb );\nlapack_int LAPACKE_dpftrs( int matrix_order, char transr, char uplo,\n                           lapack_int n, lapack_int nrhs, const double* a,\n                           double* b, lapack_int ldb );\nlapack_int LAPACKE_cpftrs( int matrix_order, char transr, char uplo,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_float* a,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zpftrs( int matrix_order, char transr, char uplo,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_double* a,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_spocon( int matrix_order, char uplo, lapack_int n,\n                           const float* a, lapack_int lda, float anorm,\n                           float* rcond );\nlapack_int LAPACKE_dpocon( int matrix_order, char uplo, lapack_int n,\n                           const double* a, lapack_int lda, double anorm,\n                           double* rcond );\nlapack_int LAPACKE_cpocon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           float anorm, float* rcond );\nlapack_int LAPACKE_zpocon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           double anorm, double* rcond );\n\nlapack_int LAPACKE_spoequ( int matrix_order, lapack_int n, const float* a,\n                           lapack_int lda, float* s, float* scond,\n                           float* amax );\nlapack_int LAPACKE_dpoequ( int matrix_order, lapack_int n, const double* a,\n                           lapack_int lda, double* s, double* scond,\n                           double* amax );\nlapack_int LAPACKE_cpoequ( int matrix_order, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           float* s, float* scond, float* amax );\nlapack_int LAPACKE_zpoequ( int matrix_order, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           double* s, double* scond, double* amax );\n\nlapack_int LAPACKE_spoequb( int matrix_order, lapack_int n, const float* a,\n                            lapack_int lda, float* s, float* scond,\n                            float* amax );\nlapack_int LAPACKE_dpoequb( int matrix_order, lapack_int n, const double* a,\n                            lapack_int lda, double* s, double* scond,\n                            double* amax );\nlapack_int LAPACKE_cpoequb( int matrix_order, lapack_int n,\n                            const lapack_complex_float* a, lapack_int lda,\n                            float* s, float* scond, float* amax );\nlapack_int LAPACKE_zpoequb( int matrix_order, lapack_int n,\n                            const lapack_complex_double* a, lapack_int lda,\n                            double* s, double* scond, double* amax );\n\nlapack_int LAPACKE_sporfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* a, lapack_int lda,\n                           const float* af, lapack_int ldaf, const float* b,\n                           lapack_int ldb, float* x, lapack_int ldx,\n                           float* ferr, float* berr );\nlapack_int LAPACKE_dporfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* a, lapack_int lda,\n                           const double* af, lapack_int ldaf, const double* b,\n                           lapack_int ldb, double* x, lapack_int ldx,\n                           double* ferr, double* berr );\nlapack_int LAPACKE_cporfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* af,\n                           lapack_int ldaf, const lapack_complex_float* b,\n                           lapack_int ldb, lapack_complex_float* x,\n                           lapack_int ldx, float* ferr, float* berr );\nlapack_int LAPACKE_zporfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* af,\n                           lapack_int ldaf, const lapack_complex_double* b,\n                           lapack_int ldb, lapack_complex_double* x,\n                           lapack_int ldx, double* ferr, double* berr );\n\nlapack_int LAPACKE_sporfsx( int matrix_order, char uplo, char equed,\n                            lapack_int n, lapack_int nrhs, const float* a,\n                            lapack_int lda, const float* af, lapack_int ldaf,\n                            const float* s, const float* b, lapack_int ldb,\n                            float* x, lapack_int ldx, float* rcond, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_dporfsx( int matrix_order, char uplo, char equed,\n                            lapack_int n, lapack_int nrhs, const double* a,\n                            lapack_int lda, const double* af, lapack_int ldaf,\n                            const double* s, const double* b, lapack_int ldb,\n                            double* x, lapack_int ldx, double* rcond,\n                            double* berr, lapack_int n_err_bnds,\n                            double* err_bnds_norm, double* err_bnds_comp,\n                            lapack_int nparams, double* params );\nlapack_int LAPACKE_cporfsx( int matrix_order, char uplo, char equed,\n                            lapack_int n, lapack_int nrhs,\n                            const lapack_complex_float* a, lapack_int lda,\n                            const lapack_complex_float* af, lapack_int ldaf,\n                            const float* s, const lapack_complex_float* b,\n                            lapack_int ldb, lapack_complex_float* x,\n                            lapack_int ldx, float* rcond, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_zporfsx( int matrix_order, char uplo, char equed,\n                            lapack_int n, lapack_int nrhs,\n                            const lapack_complex_double* a, lapack_int lda,\n                            const lapack_complex_double* af, lapack_int ldaf,\n                            const double* s, const lapack_complex_double* b,\n                            lapack_int ldb, lapack_complex_double* x,\n                            lapack_int ldx, double* rcond, double* berr,\n                            lapack_int n_err_bnds, double* err_bnds_norm,\n                            double* err_bnds_comp, lapack_int nparams,\n                            double* params );\n\nlapack_int LAPACKE_sposv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, float* a, lapack_int lda, float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_dposv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, double* a, lapack_int lda, double* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_cposv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_float* a,\n                          lapack_int lda, lapack_complex_float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_zposv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_double* a,\n                          lapack_int lda, lapack_complex_double* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_dsposv( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, double* a, lapack_int lda,\n                           double* b, lapack_int ldb, double* x, lapack_int ldx,\n                           lapack_int* iter );\nlapack_int LAPACKE_zcposv( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb, lapack_complex_double* x,\n                           lapack_int ldx, lapack_int* iter );\n\nlapack_int LAPACKE_sposvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, float* a, lapack_int lda, float* af,\n                           lapack_int ldaf, char* equed, float* s, float* b,\n                           lapack_int ldb, float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr );\nlapack_int LAPACKE_dposvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, double* a, lapack_int lda,\n                           double* af, lapack_int ldaf, char* equed, double* s,\n                           double* b, lapack_int ldb, double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr );\nlapack_int LAPACKE_cposvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* af,\n                           lapack_int ldaf, char* equed, float* s,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr );\nlapack_int LAPACKE_zposvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* af,\n                           lapack_int ldaf, char* equed, double* s,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr );\n\nlapack_int LAPACKE_sposvxx( int matrix_order, char fact, char uplo,\n                            lapack_int n, lapack_int nrhs, float* a,\n                            lapack_int lda, float* af, lapack_int ldaf,\n                            char* equed, float* s, float* b, lapack_int ldb,\n                            float* x, lapack_int ldx, float* rcond,\n                            float* rpvgrw, float* berr, lapack_int n_err_bnds,\n                            float* err_bnds_norm, float* err_bnds_comp,\n                            lapack_int nparams, float* params );\nlapack_int LAPACKE_dposvxx( int matrix_order, char fact, char uplo,\n                            lapack_int n, lapack_int nrhs, double* a,\n                            lapack_int lda, double* af, lapack_int ldaf,\n                            char* equed, double* s, double* b, lapack_int ldb,\n                            double* x, lapack_int ldx, double* rcond,\n                            double* rpvgrw, double* berr, lapack_int n_err_bnds,\n                            double* err_bnds_norm, double* err_bnds_comp,\n                            lapack_int nparams, double* params );\nlapack_int LAPACKE_cposvxx( int matrix_order, char fact, char uplo,\n                            lapack_int n, lapack_int nrhs,\n                            lapack_complex_float* a, lapack_int lda,\n                            lapack_complex_float* af, lapack_int ldaf,\n                            char* equed, float* s, lapack_complex_float* b,\n                            lapack_int ldb, lapack_complex_float* x,\n                            lapack_int ldx, float* rcond, float* rpvgrw,\n                            float* berr, lapack_int n_err_bnds,\n                            float* err_bnds_norm, float* err_bnds_comp,\n                            lapack_int nparams, float* params );\nlapack_int LAPACKE_zposvxx( int matrix_order, char fact, char uplo,\n                            lapack_int n, lapack_int nrhs,\n                            lapack_complex_double* a, lapack_int lda,\n                            lapack_complex_double* af, lapack_int ldaf,\n                            char* equed, double* s, lapack_complex_double* b,\n                            lapack_int ldb, lapack_complex_double* x,\n                            lapack_int ldx, double* rcond, double* rpvgrw,\n                            double* berr, lapack_int n_err_bnds,\n                            double* err_bnds_norm, double* err_bnds_comp,\n                            lapack_int nparams, double* params );\n\nlapack_int LAPACKE_spotrf( int matrix_order, char uplo, lapack_int n, float* a,\n                           lapack_int lda );\nlapack_int LAPACKE_dpotrf( int matrix_order, char uplo, lapack_int n, double* a,\n                           lapack_int lda );\nlapack_int LAPACKE_cpotrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_zpotrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_spotri( int matrix_order, char uplo, lapack_int n, float* a,\n                           lapack_int lda );\nlapack_int LAPACKE_dpotri( int matrix_order, char uplo, lapack_int n, double* a,\n                           lapack_int lda );\nlapack_int LAPACKE_cpotri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_zpotri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_spotrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* a, lapack_int lda,\n                           float* b, lapack_int ldb );\nlapack_int LAPACKE_dpotrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* a, lapack_int lda,\n                           double* b, lapack_int ldb );\nlapack_int LAPACKE_cpotrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_zpotrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb );\n\nlapack_int LAPACKE_sppcon( int matrix_order, char uplo, lapack_int n,\n                           const float* ap, float anorm, float* rcond );\nlapack_int LAPACKE_dppcon( int matrix_order, char uplo, lapack_int n,\n                           const double* ap, double anorm, double* rcond );\nlapack_int LAPACKE_cppcon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_float* ap, float anorm,\n                           float* rcond );\nlapack_int LAPACKE_zppcon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_double* ap, double anorm,\n                           double* rcond );\n\nlapack_int LAPACKE_sppequ( int matrix_order, char uplo, lapack_int n,\n                           const float* ap, float* s, float* scond,\n                           float* amax );\nlapack_int LAPACKE_dppequ( int matrix_order, char uplo, lapack_int n,\n                           const double* ap, double* s, double* scond,\n                           double* amax );\nlapack_int LAPACKE_cppequ( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_float* ap, float* s,\n                           float* scond, float* amax );\nlapack_int LAPACKE_zppequ( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_double* ap, double* s,\n                           double* scond, double* amax );\n\nlapack_int LAPACKE_spprfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* ap, const float* afp,\n                           const float* b, lapack_int ldb, float* x,\n                           lapack_int ldx, float* ferr, float* berr );\nlapack_int LAPACKE_dpprfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* ap, const double* afp,\n                           const double* b, lapack_int ldb, double* x,\n                           lapack_int ldx, double* ferr, double* berr );\nlapack_int LAPACKE_cpprfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* ap,\n                           const lapack_complex_float* afp,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zpprfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* ap,\n                           const lapack_complex_double* afp,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_sppsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, float* ap, float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_dppsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, double* ap, double* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_cppsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_float* ap,\n                          lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zppsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_double* ap,\n                          lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sppsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, float* ap, float* afp, char* equed,\n                           float* s, float* b, lapack_int ldb, float* x,\n                           lapack_int ldx, float* rcond, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_dppsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, double* ap, double* afp,\n                           char* equed, double* s, double* b, lapack_int ldb,\n                           double* x, lapack_int ldx, double* rcond,\n                           double* ferr, double* berr );\nlapack_int LAPACKE_cppsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, lapack_complex_float* ap,\n                           lapack_complex_float* afp, char* equed, float* s,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr );\nlapack_int LAPACKE_zppsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, lapack_complex_double* ap,\n                           lapack_complex_double* afp, char* equed, double* s,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr );\n\nlapack_int LAPACKE_spptrf( int matrix_order, char uplo, lapack_int n,\n                           float* ap );\nlapack_int LAPACKE_dpptrf( int matrix_order, char uplo, lapack_int n,\n                           double* ap );\nlapack_int LAPACKE_cpptrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* ap );\nlapack_int LAPACKE_zpptrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* ap );\n\nlapack_int LAPACKE_spptri( int matrix_order, char uplo, lapack_int n,\n                           float* ap );\nlapack_int LAPACKE_dpptri( int matrix_order, char uplo, lapack_int n,\n                           double* ap );\nlapack_int LAPACKE_cpptri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* ap );\nlapack_int LAPACKE_zpptri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* ap );\n\nlapack_int LAPACKE_spptrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* ap, float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_dpptrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* ap, double* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_cpptrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* ap,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zpptrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* ap,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_spstrf( int matrix_order, char uplo, lapack_int n, float* a,\n                           lapack_int lda, lapack_int* piv, lapack_int* rank,\n                           float tol );\nlapack_int LAPACKE_dpstrf( int matrix_order, char uplo, lapack_int n, double* a,\n                           lapack_int lda, lapack_int* piv, lapack_int* rank,\n                           double tol );\nlapack_int LAPACKE_cpstrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* piv, lapack_int* rank, float tol );\nlapack_int LAPACKE_zpstrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* piv, lapack_int* rank, double tol );\n\nlapack_int LAPACKE_sptcon( lapack_int n, const float* d, const float* e,\n                           float anorm, float* rcond );\nlapack_int LAPACKE_dptcon( lapack_int n, const double* d, const double* e,\n                           double anorm, double* rcond );\nlapack_int LAPACKE_cptcon( lapack_int n, const float* d,\n                           const lapack_complex_float* e, float anorm,\n                           float* rcond );\nlapack_int LAPACKE_zptcon( lapack_int n, const double* d,\n                           const lapack_complex_double* e, double anorm,\n                           double* rcond );\n\nlapack_int LAPACKE_spteqr( int matrix_order, char compz, lapack_int n, float* d,\n                           float* e, float* z, lapack_int ldz );\nlapack_int LAPACKE_dpteqr( int matrix_order, char compz, lapack_int n,\n                           double* d, double* e, double* z, lapack_int ldz );\nlapack_int LAPACKE_cpteqr( int matrix_order, char compz, lapack_int n, float* d,\n                           float* e, lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zpteqr( int matrix_order, char compz, lapack_int n,\n                           double* d, double* e, lapack_complex_double* z,\n                           lapack_int ldz );\n\nlapack_int LAPACKE_sptrfs( int matrix_order, lapack_int n, lapack_int nrhs,\n                           const float* d, const float* e, const float* df,\n                           const float* ef, const float* b, lapack_int ldb,\n                           float* x, lapack_int ldx, float* ferr, float* berr );\nlapack_int LAPACKE_dptrfs( int matrix_order, lapack_int n, lapack_int nrhs,\n                           const double* d, const double* e, const double* df,\n                           const double* ef, const double* b, lapack_int ldb,\n                           double* x, lapack_int ldx, double* ferr,\n                           double* berr );\nlapack_int LAPACKE_cptrfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* d,\n                           const lapack_complex_float* e, const float* df,\n                           const lapack_complex_float* ef,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zptrfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* d,\n                           const lapack_complex_double* e, const double* df,\n                           const lapack_complex_double* ef,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_sptsv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          float* d, float* e, float* b, lapack_int ldb );\nlapack_int LAPACKE_dptsv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          double* d, double* e, double* b, lapack_int ldb );\nlapack_int LAPACKE_cptsv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          float* d, lapack_complex_float* e,\n                          lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zptsv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          double* d, lapack_complex_double* e,\n                          lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sptsvx( int matrix_order, char fact, lapack_int n,\n                           lapack_int nrhs, const float* d, const float* e,\n                           float* df, float* ef, const float* b, lapack_int ldb,\n                           float* x, lapack_int ldx, float* rcond, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_dptsvx( int matrix_order, char fact, lapack_int n,\n                           lapack_int nrhs, const double* d, const double* e,\n                           double* df, double* ef, const double* b,\n                           lapack_int ldb, double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr );\nlapack_int LAPACKE_cptsvx( int matrix_order, char fact, lapack_int n,\n                           lapack_int nrhs, const float* d,\n                           const lapack_complex_float* e, float* df,\n                           lapack_complex_float* ef,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr );\nlapack_int LAPACKE_zptsvx( int matrix_order, char fact, lapack_int n,\n                           lapack_int nrhs, const double* d,\n                           const lapack_complex_double* e, double* df,\n                           lapack_complex_double* ef,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr );\n\nlapack_int LAPACKE_spttrf( lapack_int n, float* d, float* e );\nlapack_int LAPACKE_dpttrf( lapack_int n, double* d, double* e );\nlapack_int LAPACKE_cpttrf( lapack_int n, float* d, lapack_complex_float* e );\nlapack_int LAPACKE_zpttrf( lapack_int n, double* d, lapack_complex_double* e );\n\nlapack_int LAPACKE_spttrs( int matrix_order, lapack_int n, lapack_int nrhs,\n                           const float* d, const float* e, float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_dpttrs( int matrix_order, lapack_int n, lapack_int nrhs,\n                           const double* d, const double* e, double* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_cpttrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* d,\n                           const lapack_complex_float* e,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zpttrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* d,\n                           const lapack_complex_double* e,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_ssbev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_int kd, float* ab, lapack_int ldab, float* w,\n                          float* z, lapack_int ldz );\nlapack_int LAPACKE_dsbev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_int kd, double* ab, lapack_int ldab, double* w,\n                          double* z, lapack_int ldz );\n\nlapack_int LAPACKE_ssbevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_int kd, float* ab, lapack_int ldab, float* w,\n                           float* z, lapack_int ldz );\nlapack_int LAPACKE_dsbevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_int kd, double* ab, lapack_int ldab,\n                           double* w, double* z, lapack_int ldz );\n\nlapack_int LAPACKE_ssbevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_int kd, float* ab,\n                           lapack_int ldab, float* q, lapack_int ldq, float vl,\n                           float vu, lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, float* z, lapack_int ldz,\n                           lapack_int* ifail );\nlapack_int LAPACKE_dsbevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_int kd, double* ab,\n                           lapack_int ldab, double* q, lapack_int ldq,\n                           double vl, double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w, double* z,\n                           lapack_int ldz, lapack_int* ifail );\n\nlapack_int LAPACKE_ssbgst( int matrix_order, char vect, char uplo, lapack_int n,\n                           lapack_int ka, lapack_int kb, float* ab,\n                           lapack_int ldab, const float* bb, lapack_int ldbb,\n                           float* x, lapack_int ldx );\nlapack_int LAPACKE_dsbgst( int matrix_order, char vect, char uplo, lapack_int n,\n                           lapack_int ka, lapack_int kb, double* ab,\n                           lapack_int ldab, const double* bb, lapack_int ldbb,\n                           double* x, lapack_int ldx );\n\nlapack_int LAPACKE_ssbgv( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_int ka, lapack_int kb, float* ab,\n                          lapack_int ldab, float* bb, lapack_int ldbb, float* w,\n                          float* z, lapack_int ldz );\nlapack_int LAPACKE_dsbgv( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_int ka, lapack_int kb, double* ab,\n                          lapack_int ldab, double* bb, lapack_int ldbb,\n                          double* w, double* z, lapack_int ldz );\n\nlapack_int LAPACKE_ssbgvd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_int ka, lapack_int kb, float* ab,\n                           lapack_int ldab, float* bb, lapack_int ldbb,\n                           float* w, float* z, lapack_int ldz );\nlapack_int LAPACKE_dsbgvd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_int ka, lapack_int kb, double* ab,\n                           lapack_int ldab, double* bb, lapack_int ldbb,\n                           double* w, double* z, lapack_int ldz );\n\nlapack_int LAPACKE_ssbgvx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_int ka, lapack_int kb,\n                           float* ab, lapack_int ldab, float* bb,\n                           lapack_int ldbb, float* q, lapack_int ldq, float vl,\n                           float vu, lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, float* z, lapack_int ldz,\n                           lapack_int* ifail );\nlapack_int LAPACKE_dsbgvx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_int ka, lapack_int kb,\n                           double* ab, lapack_int ldab, double* bb,\n                           lapack_int ldbb, double* q, lapack_int ldq,\n                           double vl, double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w, double* z,\n                           lapack_int ldz, lapack_int* ifail );\n\nlapack_int LAPACKE_ssbtrd( int matrix_order, char vect, char uplo, lapack_int n,\n                           lapack_int kd, float* ab, lapack_int ldab, float* d,\n                           float* e, float* q, lapack_int ldq );\nlapack_int LAPACKE_dsbtrd( int matrix_order, char vect, char uplo, lapack_int n,\n                           lapack_int kd, double* ab, lapack_int ldab,\n                           double* d, double* e, double* q, lapack_int ldq );\n\nlapack_int LAPACKE_ssfrk( int matrix_order, char transr, char uplo, char trans,\n                          lapack_int n, lapack_int k, float alpha,\n                          const float* a, lapack_int lda, float beta,\n                          float* c );\nlapack_int LAPACKE_dsfrk( int matrix_order, char transr, char uplo, char trans,\n                          lapack_int n, lapack_int k, double alpha,\n                          const double* a, lapack_int lda, double beta,\n                          double* c );\n\nlapack_int LAPACKE_sspcon( int matrix_order, char uplo, lapack_int n,\n                           const float* ap, const lapack_int* ipiv, float anorm,\n                           float* rcond );\nlapack_int LAPACKE_dspcon( int matrix_order, char uplo, lapack_int n,\n                           const double* ap, const lapack_int* ipiv,\n                           double anorm, double* rcond );\nlapack_int LAPACKE_cspcon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_float* ap,\n                           const lapack_int* ipiv, float anorm, float* rcond );\nlapack_int LAPACKE_zspcon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_double* ap,\n                           const lapack_int* ipiv, double anorm,\n                           double* rcond );\n\nlapack_int LAPACKE_sspev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          float* ap, float* w, float* z, lapack_int ldz );\nlapack_int LAPACKE_dspev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          double* ap, double* w, double* z, lapack_int ldz );\n\nlapack_int LAPACKE_sspevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           float* ap, float* w, float* z, lapack_int ldz );\nlapack_int LAPACKE_dspevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           double* ap, double* w, double* z, lapack_int ldz );\n\nlapack_int LAPACKE_sspevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, float* ap, float vl, float vu,\n                           lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, float* z, lapack_int ldz,\n                           lapack_int* ifail );\nlapack_int LAPACKE_dspevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, double* ap, double vl, double vu,\n                           lapack_int il, lapack_int iu, double abstol,\n                           lapack_int* m, double* w, double* z, lapack_int ldz,\n                           lapack_int* ifail );\n\nlapack_int LAPACKE_sspgst( int matrix_order, lapack_int itype, char uplo,\n                           lapack_int n, float* ap, const float* bp );\nlapack_int LAPACKE_dspgst( int matrix_order, lapack_int itype, char uplo,\n                           lapack_int n, double* ap, const double* bp );\n\nlapack_int LAPACKE_sspgv( int matrix_order, lapack_int itype, char jobz,\n                          char uplo, lapack_int n, float* ap, float* bp,\n                          float* w, float* z, lapack_int ldz );\nlapack_int LAPACKE_dspgv( int matrix_order, lapack_int itype, char jobz,\n                          char uplo, lapack_int n, double* ap, double* bp,\n                          double* w, double* z, lapack_int ldz );\n\nlapack_int LAPACKE_sspgvd( int matrix_order, lapack_int itype, char jobz,\n                           char uplo, lapack_int n, float* ap, float* bp,\n                           float* w, float* z, lapack_int ldz );\nlapack_int LAPACKE_dspgvd( int matrix_order, lapack_int itype, char jobz,\n                           char uplo, lapack_int n, double* ap, double* bp,\n                           double* w, double* z, lapack_int ldz );\n\nlapack_int LAPACKE_sspgvx( int matrix_order, lapack_int itype, char jobz,\n                           char range, char uplo, lapack_int n, float* ap,\n                           float* bp, float vl, float vu, lapack_int il,\n                           lapack_int iu, float abstol, lapack_int* m, float* w,\n                           float* z, lapack_int ldz, lapack_int* ifail );\nlapack_int LAPACKE_dspgvx( int matrix_order, lapack_int itype, char jobz,\n                           char range, char uplo, lapack_int n, double* ap,\n                           double* bp, double vl, double vu, lapack_int il,\n                           lapack_int iu, double abstol, lapack_int* m,\n                           double* w, double* z, lapack_int ldz,\n                           lapack_int* ifail );\n\nlapack_int LAPACKE_ssprfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* ap, const float* afp,\n                           const lapack_int* ipiv, const float* b,\n                           lapack_int ldb, float* x, lapack_int ldx,\n                           float* ferr, float* berr );\nlapack_int LAPACKE_dsprfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* ap, const double* afp,\n                           const lapack_int* ipiv, const double* b,\n                           lapack_int ldb, double* x, lapack_int ldx,\n                           double* ferr, double* berr );\nlapack_int LAPACKE_csprfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* ap,\n                           const lapack_complex_float* afp,\n                           const lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zsprfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* ap,\n                           const lapack_complex_double* afp,\n                           const lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_sspsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, float* ap, lapack_int* ipiv,\n                          float* b, lapack_int ldb );\nlapack_int LAPACKE_dspsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, double* ap, lapack_int* ipiv,\n                          double* b, lapack_int ldb );\nlapack_int LAPACKE_cspsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_float* ap,\n                          lapack_int* ipiv, lapack_complex_float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_zspsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_double* ap,\n                          lapack_int* ipiv, lapack_complex_double* b,\n                          lapack_int ldb );\n\nlapack_int LAPACKE_sspsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* ap, float* afp,\n                           lapack_int* ipiv, const float* b, lapack_int ldb,\n                           float* x, lapack_int ldx, float* rcond, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_dspsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* ap, double* afp,\n                           lapack_int* ipiv, const double* b, lapack_int ldb,\n                           double* x, lapack_int ldx, double* rcond,\n                           double* ferr, double* berr );\nlapack_int LAPACKE_cspsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* ap,\n                           lapack_complex_float* afp, lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr );\nlapack_int LAPACKE_zspsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* ap,\n                           lapack_complex_double* afp, lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr );\n\nlapack_int LAPACKE_ssptrd( int matrix_order, char uplo, lapack_int n, float* ap,\n                           float* d, float* e, float* tau );\nlapack_int LAPACKE_dsptrd( int matrix_order, char uplo, lapack_int n,\n                           double* ap, double* d, double* e, double* tau );\n\nlapack_int LAPACKE_ssptrf( int matrix_order, char uplo, lapack_int n, float* ap,\n                           lapack_int* ipiv );\nlapack_int LAPACKE_dsptrf( int matrix_order, char uplo, lapack_int n,\n                           double* ap, lapack_int* ipiv );\nlapack_int LAPACKE_csptrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* ap, lapack_int* ipiv );\nlapack_int LAPACKE_zsptrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* ap, lapack_int* ipiv );\n\nlapack_int LAPACKE_ssptri( int matrix_order, char uplo, lapack_int n, float* ap,\n                           const lapack_int* ipiv );\nlapack_int LAPACKE_dsptri( int matrix_order, char uplo, lapack_int n,\n                           double* ap, const lapack_int* ipiv );\nlapack_int LAPACKE_csptri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* ap, const lapack_int* ipiv );\nlapack_int LAPACKE_zsptri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* ap, const lapack_int* ipiv );\n\nlapack_int LAPACKE_ssptrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* ap,\n                           const lapack_int* ipiv, float* b, lapack_int ldb );\nlapack_int LAPACKE_dsptrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* ap,\n                           const lapack_int* ipiv, double* b, lapack_int ldb );\nlapack_int LAPACKE_csptrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* ap,\n                           const lapack_int* ipiv, lapack_complex_float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_zsptrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* ap,\n                           const lapack_int* ipiv, lapack_complex_double* b,\n                           lapack_int ldb );\n\nlapack_int LAPACKE_sstebz( char range, char order, lapack_int n, float vl,\n                           float vu, lapack_int il, lapack_int iu, float abstol,\n                           const float* d, const float* e, lapack_int* m,\n                           lapack_int* nsplit, float* w, lapack_int* iblock,\n                           lapack_int* isplit );\nlapack_int LAPACKE_dstebz( char range, char order, lapack_int n, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, const double* d, const double* e,\n                           lapack_int* m, lapack_int* nsplit, double* w,\n                           lapack_int* iblock, lapack_int* isplit );\n\nlapack_int LAPACKE_sstedc( int matrix_order, char compz, lapack_int n, float* d,\n                           float* e, float* z, lapack_int ldz );\nlapack_int LAPACKE_dstedc( int matrix_order, char compz, lapack_int n,\n                           double* d, double* e, double* z, lapack_int ldz );\nlapack_int LAPACKE_cstedc( int matrix_order, char compz, lapack_int n, float* d,\n                           float* e, lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zstedc( int matrix_order, char compz, lapack_int n,\n                           double* d, double* e, lapack_complex_double* z,\n                           lapack_int ldz );\n\nlapack_int LAPACKE_sstegr( int matrix_order, char jobz, char range,\n                           lapack_int n, float* d, float* e, float vl, float vu,\n                           lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, float* z, lapack_int ldz,\n                           lapack_int* isuppz );\nlapack_int LAPACKE_dstegr( int matrix_order, char jobz, char range,\n                           lapack_int n, double* d, double* e, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w, double* z,\n                           lapack_int ldz, lapack_int* isuppz );\nlapack_int LAPACKE_cstegr( int matrix_order, char jobz, char range,\n                           lapack_int n, float* d, float* e, float vl, float vu,\n                           lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, lapack_complex_float* z,\n                           lapack_int ldz, lapack_int* isuppz );\nlapack_int LAPACKE_zstegr( int matrix_order, char jobz, char range,\n                           lapack_int n, double* d, double* e, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w,\n                           lapack_complex_double* z, lapack_int ldz,\n                           lapack_int* isuppz );\n\nlapack_int LAPACKE_sstein( int matrix_order, lapack_int n, const float* d,\n                           const float* e, lapack_int m, const float* w,\n                           const lapack_int* iblock, const lapack_int* isplit,\n                           float* z, lapack_int ldz, lapack_int* ifailv );\nlapack_int LAPACKE_dstein( int matrix_order, lapack_int n, const double* d,\n                           const double* e, lapack_int m, const double* w,\n                           const lapack_int* iblock, const lapack_int* isplit,\n                           double* z, lapack_int ldz, lapack_int* ifailv );\nlapack_int LAPACKE_cstein( int matrix_order, lapack_int n, const float* d,\n                           const float* e, lapack_int m, const float* w,\n                           const lapack_int* iblock, const lapack_int* isplit,\n                           lapack_complex_float* z, lapack_int ldz,\n                           lapack_int* ifailv );\nlapack_int LAPACKE_zstein( int matrix_order, lapack_int n, const double* d,\n                           const double* e, lapack_int m, const double* w,\n                           const lapack_int* iblock, const lapack_int* isplit,\n                           lapack_complex_double* z, lapack_int ldz,\n                           lapack_int* ifailv );\n\nlapack_int LAPACKE_sstemr( int matrix_order, char jobz, char range,\n                           lapack_int n, float* d, float* e, float vl, float vu,\n                           lapack_int il, lapack_int iu, lapack_int* m,\n                           float* w, float* z, lapack_int ldz, lapack_int nzc,\n                           lapack_int* isuppz, lapack_logical* tryrac );\nlapack_int LAPACKE_dstemr( int matrix_order, char jobz, char range,\n                           lapack_int n, double* d, double* e, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           lapack_int* m, double* w, double* z, lapack_int ldz,\n                           lapack_int nzc, lapack_int* isuppz,\n                           lapack_logical* tryrac );\nlapack_int LAPACKE_cstemr( int matrix_order, char jobz, char range,\n                           lapack_int n, float* d, float* e, float vl, float vu,\n                           lapack_int il, lapack_int iu, lapack_int* m,\n                           float* w, lapack_complex_float* z, lapack_int ldz,\n                           lapack_int nzc, lapack_int* isuppz,\n                           lapack_logical* tryrac );\nlapack_int LAPACKE_zstemr( int matrix_order, char jobz, char range,\n                           lapack_int n, double* d, double* e, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           lapack_int* m, double* w, lapack_complex_double* z,\n                           lapack_int ldz, lapack_int nzc, lapack_int* isuppz,\n                           lapack_logical* tryrac );\n\nlapack_int LAPACKE_ssteqr( int matrix_order, char compz, lapack_int n, float* d,\n                           float* e, float* z, lapack_int ldz );\nlapack_int LAPACKE_dsteqr( int matrix_order, char compz, lapack_int n,\n                           double* d, double* e, double* z, lapack_int ldz );\nlapack_int LAPACKE_csteqr( int matrix_order, char compz, lapack_int n, float* d,\n                           float* e, lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zsteqr( int matrix_order, char compz, lapack_int n,\n                           double* d, double* e, lapack_complex_double* z,\n                           lapack_int ldz );\n\nlapack_int LAPACKE_ssterf( lapack_int n, float* d, float* e );\nlapack_int LAPACKE_dsterf( lapack_int n, double* d, double* e );\n\nlapack_int LAPACKE_sstev( int matrix_order, char jobz, lapack_int n, float* d,\n                          float* e, float* z, lapack_int ldz );\nlapack_int LAPACKE_dstev( int matrix_order, char jobz, lapack_int n, double* d,\n                          double* e, double* z, lapack_int ldz );\n\nlapack_int LAPACKE_sstevd( int matrix_order, char jobz, lapack_int n, float* d,\n                           float* e, float* z, lapack_int ldz );\nlapack_int LAPACKE_dstevd( int matrix_order, char jobz, lapack_int n, double* d,\n                           double* e, double* z, lapack_int ldz );\n\nlapack_int LAPACKE_sstevr( int matrix_order, char jobz, char range,\n                           lapack_int n, float* d, float* e, float vl, float vu,\n                           lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, float* z, lapack_int ldz,\n                           lapack_int* isuppz );\nlapack_int LAPACKE_dstevr( int matrix_order, char jobz, char range,\n                           lapack_int n, double* d, double* e, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w, double* z,\n                           lapack_int ldz, lapack_int* isuppz );\n\nlapack_int LAPACKE_sstevx( int matrix_order, char jobz, char range,\n                           lapack_int n, float* d, float* e, float vl, float vu,\n                           lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, float* z, lapack_int ldz,\n                           lapack_int* ifail );\nlapack_int LAPACKE_dstevx( int matrix_order, char jobz, char range,\n                           lapack_int n, double* d, double* e, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w, double* z,\n                           lapack_int ldz, lapack_int* ifail );\n\nlapack_int LAPACKE_ssycon( int matrix_order, char uplo, lapack_int n,\n                           const float* a, lapack_int lda,\n                           const lapack_int* ipiv, float anorm, float* rcond );\nlapack_int LAPACKE_dsycon( int matrix_order, char uplo, lapack_int n,\n                           const double* a, lapack_int lda,\n                           const lapack_int* ipiv, double anorm,\n                           double* rcond );\nlapack_int LAPACKE_csycon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_int* ipiv, float anorm, float* rcond );\nlapack_int LAPACKE_zsycon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_int* ipiv, double anorm,\n                           double* rcond );\n\nlapack_int LAPACKE_ssyequb( int matrix_order, char uplo, lapack_int n,\n                            const float* a, lapack_int lda, float* s,\n                            float* scond, float* amax );\nlapack_int LAPACKE_dsyequb( int matrix_order, char uplo, lapack_int n,\n                            const double* a, lapack_int lda, double* s,\n                            double* scond, double* amax );\nlapack_int LAPACKE_csyequb( int matrix_order, char uplo, lapack_int n,\n                            const lapack_complex_float* a, lapack_int lda,\n                            float* s, float* scond, float* amax );\nlapack_int LAPACKE_zsyequb( int matrix_order, char uplo, lapack_int n,\n                            const lapack_complex_double* a, lapack_int lda,\n                            double* s, double* scond, double* amax );\n\nlapack_int LAPACKE_ssyev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          float* a, lapack_int lda, float* w );\nlapack_int LAPACKE_dsyev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          double* a, lapack_int lda, double* w );\n\nlapack_int LAPACKE_ssyevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           float* a, lapack_int lda, float* w );\nlapack_int LAPACKE_dsyevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           double* a, lapack_int lda, double* w );\n\nlapack_int LAPACKE_ssyevr( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, float* a, lapack_int lda, float vl,\n                           float vu, lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, float* z, lapack_int ldz,\n                           lapack_int* isuppz );\nlapack_int LAPACKE_dsyevr( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, double* a, lapack_int lda, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w, double* z,\n                           lapack_int ldz, lapack_int* isuppz );\n\nlapack_int LAPACKE_ssyevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, float* a, lapack_int lda, float vl,\n                           float vu, lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, float* z, lapack_int ldz,\n                           lapack_int* ifail );\nlapack_int LAPACKE_dsyevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, double* a, lapack_int lda, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w, double* z,\n                           lapack_int ldz, lapack_int* ifail );\n\nlapack_int LAPACKE_ssygst( int matrix_order, lapack_int itype, char uplo,\n                           lapack_int n, float* a, lapack_int lda,\n                           const float* b, lapack_int ldb );\nlapack_int LAPACKE_dsygst( int matrix_order, lapack_int itype, char uplo,\n                           lapack_int n, double* a, lapack_int lda,\n                           const double* b, lapack_int ldb );\n\nlapack_int LAPACKE_ssygv( int matrix_order, lapack_int itype, char jobz,\n                          char uplo, lapack_int n, float* a, lapack_int lda,\n                          float* b, lapack_int ldb, float* w );\nlapack_int LAPACKE_dsygv( int matrix_order, lapack_int itype, char jobz,\n                          char uplo, lapack_int n, double* a, lapack_int lda,\n                          double* b, lapack_int ldb, double* w );\n\nlapack_int LAPACKE_ssygvd( int matrix_order, lapack_int itype, char jobz,\n                           char uplo, lapack_int n, float* a, lapack_int lda,\n                           float* b, lapack_int ldb, float* w );\nlapack_int LAPACKE_dsygvd( int matrix_order, lapack_int itype, char jobz,\n                           char uplo, lapack_int n, double* a, lapack_int lda,\n                           double* b, lapack_int ldb, double* w );\n\nlapack_int LAPACKE_ssygvx( int matrix_order, lapack_int itype, char jobz,\n                           char range, char uplo, lapack_int n, float* a,\n                           lapack_int lda, float* b, lapack_int ldb, float vl,\n                           float vu, lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, float* z, lapack_int ldz,\n                           lapack_int* ifail );\nlapack_int LAPACKE_dsygvx( int matrix_order, lapack_int itype, char jobz,\n                           char range, char uplo, lapack_int n, double* a,\n                           lapack_int lda, double* b, lapack_int ldb, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w, double* z,\n                           lapack_int ldz, lapack_int* ifail );\n\nlapack_int LAPACKE_ssyrfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* a, lapack_int lda,\n                           const float* af, lapack_int ldaf,\n                           const lapack_int* ipiv, const float* b,\n                           lapack_int ldb, float* x, lapack_int ldx,\n                           float* ferr, float* berr );\nlapack_int LAPACKE_dsyrfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* a, lapack_int lda,\n                           const double* af, lapack_int ldaf,\n                           const lapack_int* ipiv, const double* b,\n                           lapack_int ldb, double* x, lapack_int ldx,\n                           double* ferr, double* berr );\nlapack_int LAPACKE_csyrfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* af,\n                           lapack_int ldaf, const lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zsyrfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* af,\n                           lapack_int ldaf, const lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_ssyrfsx( int matrix_order, char uplo, char equed,\n                            lapack_int n, lapack_int nrhs, const float* a,\n                            lapack_int lda, const float* af, lapack_int ldaf,\n                            const lapack_int* ipiv, const float* s,\n                            const float* b, lapack_int ldb, float* x,\n                            lapack_int ldx, float* rcond, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_dsyrfsx( int matrix_order, char uplo, char equed,\n                            lapack_int n, lapack_int nrhs, const double* a,\n                            lapack_int lda, const double* af, lapack_int ldaf,\n                            const lapack_int* ipiv, const double* s,\n                            const double* b, lapack_int ldb, double* x,\n                            lapack_int ldx, double* rcond, double* berr,\n                            lapack_int n_err_bnds, double* err_bnds_norm,\n                            double* err_bnds_comp, lapack_int nparams,\n                            double* params );\nlapack_int LAPACKE_csyrfsx( int matrix_order, char uplo, char equed,\n                            lapack_int n, lapack_int nrhs,\n                            const lapack_complex_float* a, lapack_int lda,\n                            const lapack_complex_float* af, lapack_int ldaf,\n                            const lapack_int* ipiv, const float* s,\n                            const lapack_complex_float* b, lapack_int ldb,\n                            lapack_complex_float* x, lapack_int ldx,\n                            float* rcond, float* berr, lapack_int n_err_bnds,\n                            float* err_bnds_norm, float* err_bnds_comp,\n                            lapack_int nparams, float* params );\nlapack_int LAPACKE_zsyrfsx( int matrix_order, char uplo, char equed,\n                            lapack_int n, lapack_int nrhs,\n                            const lapack_complex_double* a, lapack_int lda,\n                            const lapack_complex_double* af, lapack_int ldaf,\n                            const lapack_int* ipiv, const double* s,\n                            const lapack_complex_double* b, lapack_int ldb,\n                            lapack_complex_double* x, lapack_int ldx,\n                            double* rcond, double* berr, lapack_int n_err_bnds,\n                            double* err_bnds_norm, double* err_bnds_comp,\n                            lapack_int nparams, double* params );\n\nlapack_int LAPACKE_ssysv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, float* a, lapack_int lda,\n                          lapack_int* ipiv, float* b, lapack_int ldb );\nlapack_int LAPACKE_dsysv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, double* a, lapack_int lda,\n                          lapack_int* ipiv, double* b, lapack_int ldb );\nlapack_int LAPACKE_csysv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_float* a,\n                          lapack_int lda, lapack_int* ipiv,\n                          lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zsysv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_double* a,\n                          lapack_int lda, lapack_int* ipiv,\n                          lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_ssysvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* a, lapack_int lda,\n                           float* af, lapack_int ldaf, lapack_int* ipiv,\n                           const float* b, lapack_int ldb, float* x,\n                           lapack_int ldx, float* rcond, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_dsysvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* a, lapack_int lda,\n                           double* af, lapack_int ldaf, lapack_int* ipiv,\n                           const double* b, lapack_int ldb, double* x,\n                           lapack_int ldx, double* rcond, double* ferr,\n                           double* berr );\nlapack_int LAPACKE_csysvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* af,\n                           lapack_int ldaf, lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr );\nlapack_int LAPACKE_zsysvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* af,\n                           lapack_int ldaf, lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr );\n\nlapack_int LAPACKE_ssysvxx( int matrix_order, char fact, char uplo,\n                            lapack_int n, lapack_int nrhs, float* a,\n                            lapack_int lda, float* af, lapack_int ldaf,\n                            lapack_int* ipiv, char* equed, float* s, float* b,\n                            lapack_int ldb, float* x, lapack_int ldx,\n                            float* rcond, float* rpvgrw, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_dsysvxx( int matrix_order, char fact, char uplo,\n                            lapack_int n, lapack_int nrhs, double* a,\n                            lapack_int lda, double* af, lapack_int ldaf,\n                            lapack_int* ipiv, char* equed, double* s, double* b,\n                            lapack_int ldb, double* x, lapack_int ldx,\n                            double* rcond, double* rpvgrw, double* berr,\n                            lapack_int n_err_bnds, double* err_bnds_norm,\n                            double* err_bnds_comp, lapack_int nparams,\n                            double* params );\nlapack_int LAPACKE_csysvxx( int matrix_order, char fact, char uplo,\n                            lapack_int n, lapack_int nrhs,\n                            lapack_complex_float* a, lapack_int lda,\n                            lapack_complex_float* af, lapack_int ldaf,\n                            lapack_int* ipiv, char* equed, float* s,\n                            lapack_complex_float* b, lapack_int ldb,\n                            lapack_complex_float* x, lapack_int ldx,\n                            float* rcond, float* rpvgrw, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_zsysvxx( int matrix_order, char fact, char uplo,\n                            lapack_int n, lapack_int nrhs,\n                            lapack_complex_double* a, lapack_int lda,\n                            lapack_complex_double* af, lapack_int ldaf,\n                            lapack_int* ipiv, char* equed, double* s,\n                            lapack_complex_double* b, lapack_int ldb,\n                            lapack_complex_double* x, lapack_int ldx,\n                            double* rcond, double* rpvgrw, double* berr,\n                            lapack_int n_err_bnds, double* err_bnds_norm,\n                            double* err_bnds_comp, lapack_int nparams,\n                            double* params );\n\nlapack_int LAPACKE_ssytrd( int matrix_order, char uplo, lapack_int n, float* a,\n                           lapack_int lda, float* d, float* e, float* tau );\nlapack_int LAPACKE_dsytrd( int matrix_order, char uplo, lapack_int n, double* a,\n                           lapack_int lda, double* d, double* e, double* tau );\n\nlapack_int LAPACKE_ssytrf( int matrix_order, char uplo, lapack_int n, float* a,\n                           lapack_int lda, lapack_int* ipiv );\nlapack_int LAPACKE_dsytrf( int matrix_order, char uplo, lapack_int n, double* a,\n                           lapack_int lda, lapack_int* ipiv );\nlapack_int LAPACKE_csytrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* ipiv );\nlapack_int LAPACKE_zsytrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* ipiv );\n\nlapack_int LAPACKE_ssytri( int matrix_order, char uplo, lapack_int n, float* a,\n                           lapack_int lda, const lapack_int* ipiv );\nlapack_int LAPACKE_dsytri( int matrix_order, char uplo, lapack_int n, double* a,\n                           lapack_int lda, const lapack_int* ipiv );\nlapack_int LAPACKE_csytri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           const lapack_int* ipiv );\nlapack_int LAPACKE_zsytri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           const lapack_int* ipiv );\n\nlapack_int LAPACKE_ssytrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* a, lapack_int lda,\n                           const lapack_int* ipiv, float* b, lapack_int ldb );\nlapack_int LAPACKE_dsytrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* a, lapack_int lda,\n                           const lapack_int* ipiv, double* b, lapack_int ldb );\nlapack_int LAPACKE_csytrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, const lapack_int* ipiv,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zsytrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, const lapack_int* ipiv,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_stbcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, lapack_int kd, const float* ab,\n                           lapack_int ldab, float* rcond );\nlapack_int LAPACKE_dtbcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, lapack_int kd, const double* ab,\n                           lapack_int ldab, double* rcond );\nlapack_int LAPACKE_ctbcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, lapack_int kd,\n                           const lapack_complex_float* ab, lapack_int ldab,\n                           float* rcond );\nlapack_int LAPACKE_ztbcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, lapack_int kd,\n                           const lapack_complex_double* ab, lapack_int ldab,\n                           double* rcond );\n\nlapack_int LAPACKE_stbrfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int kd, lapack_int nrhs,\n                           const float* ab, lapack_int ldab, const float* b,\n                           lapack_int ldb, const float* x, lapack_int ldx,\n                           float* ferr, float* berr );\nlapack_int LAPACKE_dtbrfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int kd, lapack_int nrhs,\n                           const double* ab, lapack_int ldab, const double* b,\n                           lapack_int ldb, const double* x, lapack_int ldx,\n                           double* ferr, double* berr );\nlapack_int LAPACKE_ctbrfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int kd, lapack_int nrhs,\n                           const lapack_complex_float* ab, lapack_int ldab,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           const lapack_complex_float* x, lapack_int ldx,\n                           float* ferr, float* berr );\nlapack_int LAPACKE_ztbrfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int kd, lapack_int nrhs,\n                           const lapack_complex_double* ab, lapack_int ldab,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           const lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_stbtrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int kd, lapack_int nrhs,\n                           const float* ab, lapack_int ldab, float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_dtbtrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int kd, lapack_int nrhs,\n                           const double* ab, lapack_int ldab, double* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_ctbtrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int kd, lapack_int nrhs,\n                           const lapack_complex_float* ab, lapack_int ldab,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_ztbtrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int kd, lapack_int nrhs,\n                           const lapack_complex_double* ab, lapack_int ldab,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_stfsm( int matrix_order, char transr, char side, char uplo,\n                          char trans, char diag, lapack_int m, lapack_int n,\n                          float alpha, const float* a, float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_dtfsm( int matrix_order, char transr, char side, char uplo,\n                          char trans, char diag, lapack_int m, lapack_int n,\n                          double alpha, const double* a, double* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_ctfsm( int matrix_order, char transr, char side, char uplo,\n                          char trans, char diag, lapack_int m, lapack_int n,\n                          lapack_complex_float alpha,\n                          const lapack_complex_float* a,\n                          lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_ztfsm( int matrix_order, char transr, char side, char uplo,\n                          char trans, char diag, lapack_int m, lapack_int n,\n                          lapack_complex_double alpha,\n                          const lapack_complex_double* a,\n                          lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_stftri( int matrix_order, char transr, char uplo, char diag,\n                           lapack_int n, float* a );\nlapack_int LAPACKE_dtftri( int matrix_order, char transr, char uplo, char diag,\n                           lapack_int n, double* a );\nlapack_int LAPACKE_ctftri( int matrix_order, char transr, char uplo, char diag,\n                           lapack_int n, lapack_complex_float* a );\nlapack_int LAPACKE_ztftri( int matrix_order, char transr, char uplo, char diag,\n                           lapack_int n, lapack_complex_double* a );\n\nlapack_int LAPACKE_stfttp( int matrix_order, char transr, char uplo,\n                           lapack_int n, const float* arf, float* ap );\nlapack_int LAPACKE_dtfttp( int matrix_order, char transr, char uplo,\n                           lapack_int n, const double* arf, double* ap );\nlapack_int LAPACKE_ctfttp( int matrix_order, char transr, char uplo,\n                           lapack_int n, const lapack_complex_float* arf,\n                           lapack_complex_float* ap );\nlapack_int LAPACKE_ztfttp( int matrix_order, char transr, char uplo,\n                           lapack_int n, const lapack_complex_double* arf,\n                           lapack_complex_double* ap );\n\nlapack_int LAPACKE_stfttr( int matrix_order, char transr, char uplo,\n                           lapack_int n, const float* arf, float* a,\n                           lapack_int lda );\nlapack_int LAPACKE_dtfttr( int matrix_order, char transr, char uplo,\n                           lapack_int n, const double* arf, double* a,\n                           lapack_int lda );\nlapack_int LAPACKE_ctfttr( int matrix_order, char transr, char uplo,\n                           lapack_int n, const lapack_complex_float* arf,\n                           lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_ztfttr( int matrix_order, char transr, char uplo,\n                           lapack_int n, const lapack_complex_double* arf,\n                           lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_stgevc( int matrix_order, char side, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const float* s, lapack_int lds, const float* p,\n                           lapack_int ldp, float* vl, lapack_int ldvl,\n                           float* vr, lapack_int ldvr, lapack_int mm,\n                           lapack_int* m );\nlapack_int LAPACKE_dtgevc( int matrix_order, char side, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const double* s, lapack_int lds, const double* p,\n                           lapack_int ldp, double* vl, lapack_int ldvl,\n                           double* vr, lapack_int ldvr, lapack_int mm,\n                           lapack_int* m );\nlapack_int LAPACKE_ctgevc( int matrix_order, char side, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const lapack_complex_float* s, lapack_int lds,\n                           const lapack_complex_float* p, lapack_int ldp,\n                           lapack_complex_float* vl, lapack_int ldvl,\n                           lapack_complex_float* vr, lapack_int ldvr,\n                           lapack_int mm, lapack_int* m );\nlapack_int LAPACKE_ztgevc( int matrix_order, char side, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const lapack_complex_double* s, lapack_int lds,\n                           const lapack_complex_double* p, lapack_int ldp,\n                           lapack_complex_double* vl, lapack_int ldvl,\n                           lapack_complex_double* vr, lapack_int ldvr,\n                           lapack_int mm, lapack_int* m );\n\nlapack_int LAPACKE_stgexc( int matrix_order, lapack_logical wantq,\n                           lapack_logical wantz, lapack_int n, float* a,\n                           lapack_int lda, float* b, lapack_int ldb, float* q,\n                           lapack_int ldq, float* z, lapack_int ldz,\n                           lapack_int* ifst, lapack_int* ilst );\nlapack_int LAPACKE_dtgexc( int matrix_order, lapack_logical wantq,\n                           lapack_logical wantz, lapack_int n, double* a,\n                           lapack_int lda, double* b, lapack_int ldb, double* q,\n                           lapack_int ldq, double* z, lapack_int ldz,\n                           lapack_int* ifst, lapack_int* ilst );\nlapack_int LAPACKE_ctgexc( int matrix_order, lapack_logical wantq,\n                           lapack_logical wantz, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* q, lapack_int ldq,\n                           lapack_complex_float* z, lapack_int ldz,\n                           lapack_int ifst, lapack_int ilst );\nlapack_int LAPACKE_ztgexc( int matrix_order, lapack_logical wantq,\n                           lapack_logical wantz, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* q, lapack_int ldq,\n                           lapack_complex_double* z, lapack_int ldz,\n                           lapack_int ifst, lapack_int ilst );\n\nlapack_int LAPACKE_stgsen( int matrix_order, lapack_int ijob,\n                           lapack_logical wantq, lapack_logical wantz,\n                           const lapack_logical* select, lapack_int n, float* a,\n                           lapack_int lda, float* b, lapack_int ldb,\n                           float* alphar, float* alphai, float* beta, float* q,\n                           lapack_int ldq, float* z, lapack_int ldz,\n                           lapack_int* m, float* pl, float* pr, float* dif );\nlapack_int LAPACKE_dtgsen( int matrix_order, lapack_int ijob,\n                           lapack_logical wantq, lapack_logical wantz,\n                           const lapack_logical* select, lapack_int n,\n                           double* a, lapack_int lda, double* b, lapack_int ldb,\n                           double* alphar, double* alphai, double* beta,\n                           double* q, lapack_int ldq, double* z, lapack_int ldz,\n                           lapack_int* m, double* pl, double* pr, double* dif );\nlapack_int LAPACKE_ctgsen( int matrix_order, lapack_int ijob,\n                           lapack_logical wantq, lapack_logical wantz,\n                           const lapack_logical* select, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* alpha,\n                           lapack_complex_float* beta, lapack_complex_float* q,\n                           lapack_int ldq, lapack_complex_float* z,\n                           lapack_int ldz, lapack_int* m, float* pl, float* pr,\n                           float* dif );\nlapack_int LAPACKE_ztgsen( int matrix_order, lapack_int ijob,\n                           lapack_logical wantq, lapack_logical wantz,\n                           const lapack_logical* select, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* alpha,\n                           lapack_complex_double* beta,\n                           lapack_complex_double* q, lapack_int ldq,\n                           lapack_complex_double* z, lapack_int ldz,\n                           lapack_int* m, double* pl, double* pr, double* dif );\n\nlapack_int LAPACKE_stgsja( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int p, lapack_int n,\n                           lapack_int k, lapack_int l, float* a, lapack_int lda,\n                           float* b, lapack_int ldb, float tola, float tolb,\n                           float* alpha, float* beta, float* u, lapack_int ldu,\n                           float* v, lapack_int ldv, float* q, lapack_int ldq,\n                           lapack_int* ncycle );\nlapack_int LAPACKE_dtgsja( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int p, lapack_int n,\n                           lapack_int k, lapack_int l, double* a,\n                           lapack_int lda, double* b, lapack_int ldb,\n                           double tola, double tolb, double* alpha,\n                           double* beta, double* u, lapack_int ldu, double* v,\n                           lapack_int ldv, double* q, lapack_int ldq,\n                           lapack_int* ncycle );\nlapack_int LAPACKE_ctgsja( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int p, lapack_int n,\n                           lapack_int k, lapack_int l, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb, float tola, float tolb, float* alpha,\n                           float* beta, lapack_complex_float* u, lapack_int ldu,\n                           lapack_complex_float* v, lapack_int ldv,\n                           lapack_complex_float* q, lapack_int ldq,\n                           lapack_int* ncycle );\nlapack_int LAPACKE_ztgsja( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int p, lapack_int n,\n                           lapack_int k, lapack_int l, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb, double tola, double tolb,\n                           double* alpha, double* beta,\n                           lapack_complex_double* u, lapack_int ldu,\n                           lapack_complex_double* v, lapack_int ldv,\n                           lapack_complex_double* q, lapack_int ldq,\n                           lapack_int* ncycle );\n\nlapack_int LAPACKE_stgsna( int matrix_order, char job, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const float* a, lapack_int lda, const float* b,\n                           lapack_int ldb, const float* vl, lapack_int ldvl,\n                           const float* vr, lapack_int ldvr, float* s,\n                           float* dif, lapack_int mm, lapack_int* m );\nlapack_int LAPACKE_dtgsna( int matrix_order, char job, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const double* a, lapack_int lda, const double* b,\n                           lapack_int ldb, const double* vl, lapack_int ldvl,\n                           const double* vr, lapack_int ldvr, double* s,\n                           double* dif, lapack_int mm, lapack_int* m );\nlapack_int LAPACKE_ctgsna( int matrix_order, char job, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           const lapack_complex_float* vl, lapack_int ldvl,\n                           const lapack_complex_float* vr, lapack_int ldvr,\n                           float* s, float* dif, lapack_int mm, lapack_int* m );\nlapack_int LAPACKE_ztgsna( int matrix_order, char job, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           const lapack_complex_double* vl, lapack_int ldvl,\n                           const lapack_complex_double* vr, lapack_int ldvr,\n                           double* s, double* dif, lapack_int mm,\n                           lapack_int* m );\n\nlapack_int LAPACKE_stgsyl( int matrix_order, char trans, lapack_int ijob,\n                           lapack_int m, lapack_int n, const float* a,\n                           lapack_int lda, const float* b, lapack_int ldb,\n                           float* c, lapack_int ldc, const float* d,\n                           lapack_int ldd, const float* e, lapack_int lde,\n                           float* f, lapack_int ldf, float* scale, float* dif );\nlapack_int LAPACKE_dtgsyl( int matrix_order, char trans, lapack_int ijob,\n                           lapack_int m, lapack_int n, const double* a,\n                           lapack_int lda, const double* b, lapack_int ldb,\n                           double* c, lapack_int ldc, const double* d,\n                           lapack_int ldd, const double* e, lapack_int lde,\n                           double* f, lapack_int ldf, double* scale,\n                           double* dif );\nlapack_int LAPACKE_ctgsyl( int matrix_order, char trans, lapack_int ijob,\n                           lapack_int m, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* c, lapack_int ldc,\n                           const lapack_complex_float* d, lapack_int ldd,\n                           const lapack_complex_float* e, lapack_int lde,\n                           lapack_complex_float* f, lapack_int ldf,\n                           float* scale, float* dif );\nlapack_int LAPACKE_ztgsyl( int matrix_order, char trans, lapack_int ijob,\n                           lapack_int m, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* c, lapack_int ldc,\n                           const lapack_complex_double* d, lapack_int ldd,\n                           const lapack_complex_double* e, lapack_int lde,\n                           lapack_complex_double* f, lapack_int ldf,\n                           double* scale, double* dif );\n\nlapack_int LAPACKE_stpcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, const float* ap, float* rcond );\nlapack_int LAPACKE_dtpcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, const double* ap, double* rcond );\nlapack_int LAPACKE_ctpcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, const lapack_complex_float* ap,\n                           float* rcond );\nlapack_int LAPACKE_ztpcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, const lapack_complex_double* ap,\n                           double* rcond );\n\nlapack_int LAPACKE_stprfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs, const float* ap,\n                           const float* b, lapack_int ldb, const float* x,\n                           lapack_int ldx, float* ferr, float* berr );\nlapack_int LAPACKE_dtprfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs, const double* ap,\n                           const double* b, lapack_int ldb, const double* x,\n                           lapack_int ldx, double* ferr, double* berr );\nlapack_int LAPACKE_ctprfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_float* ap,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           const lapack_complex_float* x, lapack_int ldx,\n                           float* ferr, float* berr );\nlapack_int LAPACKE_ztprfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_double* ap,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           const lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_stptri( int matrix_order, char uplo, char diag, lapack_int n,\n                           float* ap );\nlapack_int LAPACKE_dtptri( int matrix_order, char uplo, char diag, lapack_int n,\n                           double* ap );\nlapack_int LAPACKE_ctptri( int matrix_order, char uplo, char diag, lapack_int n,\n                           lapack_complex_float* ap );\nlapack_int LAPACKE_ztptri( int matrix_order, char uplo, char diag, lapack_int n,\n                           lapack_complex_double* ap );\n\nlapack_int LAPACKE_stptrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs, const float* ap,\n                           float* b, lapack_int ldb );\nlapack_int LAPACKE_dtptrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs, const double* ap,\n                           double* b, lapack_int ldb );\nlapack_int LAPACKE_ctptrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_float* ap,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_ztptrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_double* ap,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_stpttf( int matrix_order, char transr, char uplo,\n                           lapack_int n, const float* ap, float* arf );\nlapack_int LAPACKE_dtpttf( int matrix_order, char transr, char uplo,\n                           lapack_int n, const double* ap, double* arf );\nlapack_int LAPACKE_ctpttf( int matrix_order, char transr, char uplo,\n                           lapack_int n, const lapack_complex_float* ap,\n                           lapack_complex_float* arf );\nlapack_int LAPACKE_ztpttf( int matrix_order, char transr, char uplo,\n                           lapack_int n, const lapack_complex_double* ap,\n                           lapack_complex_double* arf );\n\nlapack_int LAPACKE_stpttr( int matrix_order, char uplo, lapack_int n,\n                           const float* ap, float* a, lapack_int lda );\nlapack_int LAPACKE_dtpttr( int matrix_order, char uplo, lapack_int n,\n                           const double* ap, double* a, lapack_int lda );\nlapack_int LAPACKE_ctpttr( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_float* ap,\n                           lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_ztpttr( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_double* ap,\n                           lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_strcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, const float* a, lapack_int lda,\n                           float* rcond );\nlapack_int LAPACKE_dtrcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, const double* a, lapack_int lda,\n                           double* rcond );\nlapack_int LAPACKE_ctrcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, const lapack_complex_float* a,\n                           lapack_int lda, float* rcond );\nlapack_int LAPACKE_ztrcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, const lapack_complex_double* a,\n                           lapack_int lda, double* rcond );\n\nlapack_int LAPACKE_strevc( int matrix_order, char side, char howmny,\n                           lapack_logical* select, lapack_int n, const float* t,\n                           lapack_int ldt, float* vl, lapack_int ldvl,\n                           float* vr, lapack_int ldvr, lapack_int mm,\n                           lapack_int* m );\nlapack_int LAPACKE_dtrevc( int matrix_order, char side, char howmny,\n                           lapack_logical* select, lapack_int n,\n                           const double* t, lapack_int ldt, double* vl,\n                           lapack_int ldvl, double* vr, lapack_int ldvr,\n                           lapack_int mm, lapack_int* m );\nlapack_int LAPACKE_ctrevc( int matrix_order, char side, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           lapack_complex_float* t, lapack_int ldt,\n                           lapack_complex_float* vl, lapack_int ldvl,\n                           lapack_complex_float* vr, lapack_int ldvr,\n                           lapack_int mm, lapack_int* m );\nlapack_int LAPACKE_ztrevc( int matrix_order, char side, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           lapack_complex_double* t, lapack_int ldt,\n                           lapack_complex_double* vl, lapack_int ldvl,\n                           lapack_complex_double* vr, lapack_int ldvr,\n                           lapack_int mm, lapack_int* m );\n\nlapack_int LAPACKE_strexc( int matrix_order, char compq, lapack_int n, float* t,\n                           lapack_int ldt, float* q, lapack_int ldq,\n                           lapack_int* ifst, lapack_int* ilst );\nlapack_int LAPACKE_dtrexc( int matrix_order, char compq, lapack_int n,\n                           double* t, lapack_int ldt, double* q, lapack_int ldq,\n                           lapack_int* ifst, lapack_int* ilst );\nlapack_int LAPACKE_ctrexc( int matrix_order, char compq, lapack_int n,\n                           lapack_complex_float* t, lapack_int ldt,\n                           lapack_complex_float* q, lapack_int ldq,\n                           lapack_int ifst, lapack_int ilst );\nlapack_int LAPACKE_ztrexc( int matrix_order, char compq, lapack_int n,\n                           lapack_complex_double* t, lapack_int ldt,\n                           lapack_complex_double* q, lapack_int ldq,\n                           lapack_int ifst, lapack_int ilst );\n\nlapack_int LAPACKE_strrfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs, const float* a,\n                           lapack_int lda, const float* b, lapack_int ldb,\n                           const float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_dtrrfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs, const double* a,\n                           lapack_int lda, const double* b, lapack_int ldb,\n                           const double* x, lapack_int ldx, double* ferr,\n                           double* berr );\nlapack_int LAPACKE_ctrrfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           const lapack_complex_float* x, lapack_int ldx,\n                           float* ferr, float* berr );\nlapack_int LAPACKE_ztrrfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           const lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_strsen( int matrix_order, char job, char compq,\n                           const lapack_logical* select, lapack_int n, float* t,\n                           lapack_int ldt, float* q, lapack_int ldq, float* wr,\n                           float* wi, lapack_int* m, float* s, float* sep );\nlapack_int LAPACKE_dtrsen( int matrix_order, char job, char compq,\n                           const lapack_logical* select, lapack_int n,\n                           double* t, lapack_int ldt, double* q, lapack_int ldq,\n                           double* wr, double* wi, lapack_int* m, double* s,\n                           double* sep );\nlapack_int LAPACKE_ctrsen( int matrix_order, char job, char compq,\n                           const lapack_logical* select, lapack_int n,\n                           lapack_complex_float* t, lapack_int ldt,\n                           lapack_complex_float* q, lapack_int ldq,\n                           lapack_complex_float* w, lapack_int* m, float* s,\n                           float* sep );\nlapack_int LAPACKE_ztrsen( int matrix_order, char job, char compq,\n                           const lapack_logical* select, lapack_int n,\n                           lapack_complex_double* t, lapack_int ldt,\n                           lapack_complex_double* q, lapack_int ldq,\n                           lapack_complex_double* w, lapack_int* m, double* s,\n                           double* sep );\n\nlapack_int LAPACKE_strsna( int matrix_order, char job, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const float* t, lapack_int ldt, const float* vl,\n                           lapack_int ldvl, const float* vr, lapack_int ldvr,\n                           float* s, float* sep, lapack_int mm, lapack_int* m );\nlapack_int LAPACKE_dtrsna( int matrix_order, char job, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const double* t, lapack_int ldt, const double* vl,\n                           lapack_int ldvl, const double* vr, lapack_int ldvr,\n                           double* s, double* sep, lapack_int mm,\n                           lapack_int* m );\nlapack_int LAPACKE_ctrsna( int matrix_order, char job, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const lapack_complex_float* t, lapack_int ldt,\n                           const lapack_complex_float* vl, lapack_int ldvl,\n                           const lapack_complex_float* vr, lapack_int ldvr,\n                           float* s, float* sep, lapack_int mm, lapack_int* m );\nlapack_int LAPACKE_ztrsna( int matrix_order, char job, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const lapack_complex_double* t, lapack_int ldt,\n                           const lapack_complex_double* vl, lapack_int ldvl,\n                           const lapack_complex_double* vr, lapack_int ldvr,\n                           double* s, double* sep, lapack_int mm,\n                           lapack_int* m );\n\nlapack_int LAPACKE_strsyl( int matrix_order, char trana, char tranb,\n                           lapack_int isgn, lapack_int m, lapack_int n,\n                           const float* a, lapack_int lda, const float* b,\n                           lapack_int ldb, float* c, lapack_int ldc,\n                           float* scale );\nlapack_int LAPACKE_dtrsyl( int matrix_order, char trana, char tranb,\n                           lapack_int isgn, lapack_int m, lapack_int n,\n                           const double* a, lapack_int lda, const double* b,\n                           lapack_int ldb, double* c, lapack_int ldc,\n                           double* scale );\nlapack_int LAPACKE_ctrsyl( int matrix_order, char trana, char tranb,\n                           lapack_int isgn, lapack_int m, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* c, lapack_int ldc,\n                           float* scale );\nlapack_int LAPACKE_ztrsyl( int matrix_order, char trana, char tranb,\n                           lapack_int isgn, lapack_int m, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* c, lapack_int ldc,\n                           double* scale );\n\nlapack_int LAPACKE_strtri( int matrix_order, char uplo, char diag, lapack_int n,\n                           float* a, lapack_int lda );\nlapack_int LAPACKE_dtrtri( int matrix_order, char uplo, char diag, lapack_int n,\n                           double* a, lapack_int lda );\nlapack_int LAPACKE_ctrtri( int matrix_order, char uplo, char diag, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_ztrtri( int matrix_order, char uplo, char diag, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_strtrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs, const float* a,\n                           lapack_int lda, float* b, lapack_int ldb );\nlapack_int LAPACKE_dtrtrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs, const double* a,\n                           lapack_int lda, double* b, lapack_int ldb );\nlapack_int LAPACKE_ctrtrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_ztrtrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_strttf( int matrix_order, char transr, char uplo,\n                           lapack_int n, const float* a, lapack_int lda,\n                           float* arf );\nlapack_int LAPACKE_dtrttf( int matrix_order, char transr, char uplo,\n                           lapack_int n, const double* a, lapack_int lda,\n                           double* arf );\nlapack_int LAPACKE_ctrttf( int matrix_order, char transr, char uplo,\n                           lapack_int n, const lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* arf );\nlapack_int LAPACKE_ztrttf( int matrix_order, char transr, char uplo,\n                           lapack_int n, const lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* arf );\n\nlapack_int LAPACKE_strttp( int matrix_order, char uplo, lapack_int n,\n                           const float* a, lapack_int lda, float* ap );\nlapack_int LAPACKE_dtrttp( int matrix_order, char uplo, lapack_int n,\n                           const double* a, lapack_int lda, double* ap );\nlapack_int LAPACKE_ctrttp( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* ap );\nlapack_int LAPACKE_ztrttp( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* ap );\n\nlapack_int LAPACKE_stzrzf( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, float* tau );\nlapack_int LAPACKE_dtzrzf( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, double* tau );\nlapack_int LAPACKE_ctzrzf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* tau );\nlapack_int LAPACKE_ztzrzf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* tau );\n\nlapack_int LAPACKE_cungbr( int matrix_order, char vect, lapack_int m,\n                           lapack_int n, lapack_int k, lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* tau );\nlapack_int LAPACKE_zungbr( int matrix_order, char vect, lapack_int m,\n                           lapack_int n, lapack_int k, lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* tau );\n\nlapack_int LAPACKE_cunghr( int matrix_order, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* tau );\nlapack_int LAPACKE_zunghr( int matrix_order, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* tau );\n\nlapack_int LAPACKE_cunglq( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* tau );\nlapack_int LAPACKE_zunglq( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* tau );\n\nlapack_int LAPACKE_cungql( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* tau );\nlapack_int LAPACKE_zungql( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* tau );\n\nlapack_int LAPACKE_cungqr( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* tau );\nlapack_int LAPACKE_zungqr( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* tau );\n\nlapack_int LAPACKE_cungrq( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* tau );\nlapack_int LAPACKE_zungrq( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* tau );\n\nlapack_int LAPACKE_cungtr( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* tau );\nlapack_int LAPACKE_zungtr( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* tau );\n\nlapack_int LAPACKE_cunmbr( int matrix_order, char vect, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* tau,\n                           lapack_complex_float* c, lapack_int ldc );\nlapack_int LAPACKE_zunmbr( int matrix_order, char vect, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* tau,\n                           lapack_complex_double* c, lapack_int ldc );\n\nlapack_int LAPACKE_cunmhr( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, const lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* tau,\n                           lapack_complex_float* c, lapack_int ldc );\nlapack_int LAPACKE_zunmhr( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, const lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* tau,\n                           lapack_complex_double* c, lapack_int ldc );\n\nlapack_int LAPACKE_cunmlq( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* tau,\n                           lapack_complex_float* c, lapack_int ldc );\nlapack_int LAPACKE_zunmlq( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* tau,\n                           lapack_complex_double* c, lapack_int ldc );\n\nlapack_int LAPACKE_cunmql( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* tau,\n                           lapack_complex_float* c, lapack_int ldc );\nlapack_int LAPACKE_zunmql( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* tau,\n                           lapack_complex_double* c, lapack_int ldc );\n\nlapack_int LAPACKE_cunmqr( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* tau,\n                           lapack_complex_float* c, lapack_int ldc );\nlapack_int LAPACKE_zunmqr( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* tau,\n                           lapack_complex_double* c, lapack_int ldc );\n\nlapack_int LAPACKE_cunmrq( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* tau,\n                           lapack_complex_float* c, lapack_int ldc );\nlapack_int LAPACKE_zunmrq( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* tau,\n                           lapack_complex_double* c, lapack_int ldc );\n\nlapack_int LAPACKE_cunmrz( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           lapack_int l, const lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* tau,\n                           lapack_complex_float* c, lapack_int ldc );\nlapack_int LAPACKE_zunmrz( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           lapack_int l, const lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* tau,\n                           lapack_complex_double* c, lapack_int ldc );\n\nlapack_int LAPACKE_cunmtr( int matrix_order, char side, char uplo, char trans,\n                           lapack_int m, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* tau,\n                           lapack_complex_float* c, lapack_int ldc );\nlapack_int LAPACKE_zunmtr( int matrix_order, char side, char uplo, char trans,\n                           lapack_int m, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* tau,\n                           lapack_complex_double* c, lapack_int ldc );\n\nlapack_int LAPACKE_cupgtr( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_float* ap,\n                           const lapack_complex_float* tau,\n                           lapack_complex_float* q, lapack_int ldq );\nlapack_int LAPACKE_zupgtr( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_double* ap,\n                           const lapack_complex_double* tau,\n                           lapack_complex_double* q, lapack_int ldq );\n\nlapack_int LAPACKE_cupmtr( int matrix_order, char side, char uplo, char trans,\n                           lapack_int m, lapack_int n,\n                           const lapack_complex_float* ap,\n                           const lapack_complex_float* tau,\n                           lapack_complex_float* c, lapack_int ldc );\nlapack_int LAPACKE_zupmtr( int matrix_order, char side, char uplo, char trans,\n                           lapack_int m, lapack_int n,\n                           const lapack_complex_double* ap,\n                           const lapack_complex_double* tau,\n                           lapack_complex_double* c, lapack_int ldc );\n\nlapack_int LAPACKE_sbdsdc_work( int matrix_order, char uplo, char compq,\n                                lapack_int n, float* d, float* e, float* u,\n                                lapack_int ldu, float* vt, lapack_int ldvt,\n                                float* q, lapack_int* iq, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dbdsdc_work( int matrix_order, char uplo, char compq,\n                                lapack_int n, double* d, double* e, double* u,\n                                lapack_int ldu, double* vt, lapack_int ldvt,\n                                double* q, lapack_int* iq, double* work,\n                                lapack_int* iwork );\n\nlapack_int LAPACKE_sbdsqr_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int ncvt, lapack_int nru, lapack_int ncc,\n                                float* d, float* e, float* vt, lapack_int ldvt,\n                                float* u, lapack_int ldu, float* c,\n                                lapack_int ldc, float* work );\nlapack_int LAPACKE_dbdsqr_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int ncvt, lapack_int nru, lapack_int ncc,\n                                double* d, double* e, double* vt,\n                                lapack_int ldvt, double* u, lapack_int ldu,\n                                double* c, lapack_int ldc, double* work );\nlapack_int LAPACKE_cbdsqr_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int ncvt, lapack_int nru, lapack_int ncc,\n                                float* d, float* e, lapack_complex_float* vt,\n                                lapack_int ldvt, lapack_complex_float* u,\n                                lapack_int ldu, lapack_complex_float* c,\n                                lapack_int ldc, float* work );\nlapack_int LAPACKE_zbdsqr_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int ncvt, lapack_int nru, lapack_int ncc,\n                                double* d, double* e, lapack_complex_double* vt,\n                                lapack_int ldvt, lapack_complex_double* u,\n                                lapack_int ldu, lapack_complex_double* c,\n                                lapack_int ldc, double* work );\n\nlapack_int LAPACKE_sdisna_work( char job, lapack_int m, lapack_int n,\n                                const float* d, float* sep );\nlapack_int LAPACKE_ddisna_work( char job, lapack_int m, lapack_int n,\n                                const double* d, double* sep );\n\nlapack_int LAPACKE_sgbbrd_work( int matrix_order, char vect, lapack_int m,\n                                lapack_int n, lapack_int ncc, lapack_int kl,\n                                lapack_int ku, float* ab, lapack_int ldab,\n                                float* d, float* e, float* q, lapack_int ldq,\n                                float* pt, lapack_int ldpt, float* c,\n                                lapack_int ldc, float* work );\nlapack_int LAPACKE_dgbbrd_work( int matrix_order, char vect, lapack_int m,\n                                lapack_int n, lapack_int ncc, lapack_int kl,\n                                lapack_int ku, double* ab, lapack_int ldab,\n                                double* d, double* e, double* q, lapack_int ldq,\n                                double* pt, lapack_int ldpt, double* c,\n                                lapack_int ldc, double* work );\nlapack_int LAPACKE_cgbbrd_work( int matrix_order, char vect, lapack_int m,\n                                lapack_int n, lapack_int ncc, lapack_int kl,\n                                lapack_int ku, lapack_complex_float* ab,\n                                lapack_int ldab, float* d, float* e,\n                                lapack_complex_float* q, lapack_int ldq,\n                                lapack_complex_float* pt, lapack_int ldpt,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zgbbrd_work( int matrix_order, char vect, lapack_int m,\n                                lapack_int n, lapack_int ncc, lapack_int kl,\n                                lapack_int ku, lapack_complex_double* ab,\n                                lapack_int ldab, double* d, double* e,\n                                lapack_complex_double* q, lapack_int ldq,\n                                lapack_complex_double* pt, lapack_int ldpt,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sgbcon_work( int matrix_order, char norm, lapack_int n,\n                                lapack_int kl, lapack_int ku, const float* ab,\n                                lapack_int ldab, const lapack_int* ipiv,\n                                float anorm, float* rcond, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dgbcon_work( int matrix_order, char norm, lapack_int n,\n                                lapack_int kl, lapack_int ku, const double* ab,\n                                lapack_int ldab, const lapack_int* ipiv,\n                                double anorm, double* rcond, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_cgbcon_work( int matrix_order, char norm, lapack_int n,\n                                lapack_int kl, lapack_int ku,\n                                const lapack_complex_float* ab, lapack_int ldab,\n                                const lapack_int* ipiv, float anorm,\n                                float* rcond, lapack_complex_float* work,\n                                float* rwork );\nlapack_int LAPACKE_zgbcon_work( int matrix_order, char norm, lapack_int n,\n                                lapack_int kl, lapack_int ku,\n                                const lapack_complex_double* ab,\n                                lapack_int ldab, const lapack_int* ipiv,\n                                double anorm, double* rcond,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sgbequ_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku, const float* ab,\n                                lapack_int ldab, float* r, float* c,\n                                float* rowcnd, float* colcnd, float* amax );\nlapack_int LAPACKE_dgbequ_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku, const double* ab,\n                                lapack_int ldab, double* r, double* c,\n                                double* rowcnd, double* colcnd, double* amax );\nlapack_int LAPACKE_cgbequ_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku,\n                                const lapack_complex_float* ab, lapack_int ldab,\n                                float* r, float* c, float* rowcnd,\n                                float* colcnd, float* amax );\nlapack_int LAPACKE_zgbequ_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku,\n                                const lapack_complex_double* ab,\n                                lapack_int ldab, double* r, double* c,\n                                double* rowcnd, double* colcnd, double* amax );\n\nlapack_int LAPACKE_sgbequb_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_int kl, lapack_int ku, const float* ab,\n                                 lapack_int ldab, float* r, float* c,\n                                 float* rowcnd, float* colcnd, float* amax );\nlapack_int LAPACKE_dgbequb_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_int kl, lapack_int ku, const double* ab,\n                                 lapack_int ldab, double* r, double* c,\n                                 double* rowcnd, double* colcnd, double* amax );\nlapack_int LAPACKE_cgbequb_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_int kl, lapack_int ku,\n                                 const lapack_complex_float* ab,\n                                 lapack_int ldab, float* r, float* c,\n                                 float* rowcnd, float* colcnd, float* amax );\nlapack_int LAPACKE_zgbequb_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_int kl, lapack_int ku,\n                                 const lapack_complex_double* ab,\n                                 lapack_int ldab, double* r, double* c,\n                                 double* rowcnd, double* colcnd, double* amax );\n\nlapack_int LAPACKE_sgbrfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int kl, lapack_int ku, lapack_int nrhs,\n                                const float* ab, lapack_int ldab,\n                                const float* afb, lapack_int ldafb,\n                                const lapack_int* ipiv, const float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* ferr, float* berr, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dgbrfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int kl, lapack_int ku, lapack_int nrhs,\n                                const double* ab, lapack_int ldab,\n                                const double* afb, lapack_int ldafb,\n                                const lapack_int* ipiv, const double* b,\n                                lapack_int ldb, double* x, lapack_int ldx,\n                                double* ferr, double* berr, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_cgbrfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int kl, lapack_int ku, lapack_int nrhs,\n                                const lapack_complex_float* ab, lapack_int ldab,\n                                const lapack_complex_float* afb,\n                                lapack_int ldafb, const lapack_int* ipiv,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zgbrfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int kl, lapack_int ku, lapack_int nrhs,\n                                const lapack_complex_double* ab,\n                                lapack_int ldab,\n                                const lapack_complex_double* afb,\n                                lapack_int ldafb, const lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sgbrfsx_work( int matrix_order, char trans, char equed,\n                                 lapack_int n, lapack_int kl, lapack_int ku,\n                                 lapack_int nrhs, const float* ab,\n                                 lapack_int ldab, const float* afb,\n                                 lapack_int ldafb, const lapack_int* ipiv,\n                                 const float* r, const float* c, const float* b,\n                                 lapack_int ldb, float* x, lapack_int ldx,\n                                 float* rcond, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, float* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_dgbrfsx_work( int matrix_order, char trans, char equed,\n                                 lapack_int n, lapack_int kl, lapack_int ku,\n                                 lapack_int nrhs, const double* ab,\n                                 lapack_int ldab, const double* afb,\n                                 lapack_int ldafb, const lapack_int* ipiv,\n                                 const double* r, const double* c,\n                                 const double* b, lapack_int ldb, double* x,\n                                 lapack_int ldx, double* rcond, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, double* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_cgbrfsx_work( int matrix_order, char trans, char equed,\n                                 lapack_int n, lapack_int kl, lapack_int ku,\n                                 lapack_int nrhs,\n                                 const lapack_complex_float* ab,\n                                 lapack_int ldab,\n                                 const lapack_complex_float* afb,\n                                 lapack_int ldafb, const lapack_int* ipiv,\n                                 const float* r, const float* c,\n                                 const lapack_complex_float* b, lapack_int ldb,\n                                 lapack_complex_float* x, lapack_int ldx,\n                                 float* rcond, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, lapack_complex_float* work,\n                                 float* rwork );\nlapack_int LAPACKE_zgbrfsx_work( int matrix_order, char trans, char equed,\n                                 lapack_int n, lapack_int kl, lapack_int ku,\n                                 lapack_int nrhs,\n                                 const lapack_complex_double* ab,\n                                 lapack_int ldab,\n                                 const lapack_complex_double* afb,\n                                 lapack_int ldafb, const lapack_int* ipiv,\n                                 const double* r, const double* c,\n                                 const lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* x, lapack_int ldx,\n                                 double* rcond, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, lapack_complex_double* work,\n                                 double* rwork );\n\nlapack_int LAPACKE_sgbsv_work( int matrix_order, lapack_int n, lapack_int kl,\n                               lapack_int ku, lapack_int nrhs, float* ab,\n                               lapack_int ldab, lapack_int* ipiv, float* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_dgbsv_work( int matrix_order, lapack_int n, lapack_int kl,\n                               lapack_int ku, lapack_int nrhs, double* ab,\n                               lapack_int ldab, lapack_int* ipiv, double* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_cgbsv_work( int matrix_order, lapack_int n, lapack_int kl,\n                               lapack_int ku, lapack_int nrhs,\n                               lapack_complex_float* ab, lapack_int ldab,\n                               lapack_int* ipiv, lapack_complex_float* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_zgbsv_work( int matrix_order, lapack_int n, lapack_int kl,\n                               lapack_int ku, lapack_int nrhs,\n                               lapack_complex_double* ab, lapack_int ldab,\n                               lapack_int* ipiv, lapack_complex_double* b,\n                               lapack_int ldb );\n\nlapack_int LAPACKE_sgbsvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int kl, lapack_int ku,\n                                lapack_int nrhs, float* ab, lapack_int ldab,\n                                float* afb, lapack_int ldafb, lapack_int* ipiv,\n                                char* equed, float* r, float* c, float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* rcond, float* ferr, float* berr,\n                                float* work, lapack_int* iwork );\nlapack_int LAPACKE_dgbsvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int kl, lapack_int ku,\n                                lapack_int nrhs, double* ab, lapack_int ldab,\n                                double* afb, lapack_int ldafb, lapack_int* ipiv,\n                                char* equed, double* r, double* c, double* b,\n                                lapack_int ldb, double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_cgbsvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int kl, lapack_int ku,\n                                lapack_int nrhs, lapack_complex_float* ab,\n                                lapack_int ldab, lapack_complex_float* afb,\n                                lapack_int ldafb, lapack_int* ipiv, char* equed,\n                                float* r, float* c, lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* x,\n                                lapack_int ldx, float* rcond, float* ferr,\n                                float* berr, lapack_complex_float* work,\n                                float* rwork );\nlapack_int LAPACKE_zgbsvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int kl, lapack_int ku,\n                                lapack_int nrhs, lapack_complex_double* ab,\n                                lapack_int ldab, lapack_complex_double* afb,\n                                lapack_int ldafb, lapack_int* ipiv, char* equed,\n                                double* r, double* c, lapack_complex_double* b,\n                                lapack_int ldb, lapack_complex_double* x,\n                                lapack_int ldx, double* rcond, double* ferr,\n                                double* berr, lapack_complex_double* work,\n                                double* rwork );\n\nlapack_int LAPACKE_sgbsvxx_work( int matrix_order, char fact, char trans,\n                                 lapack_int n, lapack_int kl, lapack_int ku,\n                                 lapack_int nrhs, float* ab, lapack_int ldab,\n                                 float* afb, lapack_int ldafb, lapack_int* ipiv,\n                                 char* equed, float* r, float* c, float* b,\n                                 lapack_int ldb, float* x, lapack_int ldx,\n                                 float* rcond, float* rpvgrw, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, float* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_dgbsvxx_work( int matrix_order, char fact, char trans,\n                                 lapack_int n, lapack_int kl, lapack_int ku,\n                                 lapack_int nrhs, double* ab, lapack_int ldab,\n                                 double* afb, lapack_int ldafb,\n                                 lapack_int* ipiv, char* equed, double* r,\n                                 double* c, double* b, lapack_int ldb,\n                                 double* x, lapack_int ldx, double* rcond,\n                                 double* rpvgrw, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, double* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_cgbsvxx_work( int matrix_order, char fact, char trans,\n                                 lapack_int n, lapack_int kl, lapack_int ku,\n                                 lapack_int nrhs, lapack_complex_float* ab,\n                                 lapack_int ldab, lapack_complex_float* afb,\n                                 lapack_int ldafb, lapack_int* ipiv,\n                                 char* equed, float* r, float* c,\n                                 lapack_complex_float* b, lapack_int ldb,\n                                 lapack_complex_float* x, lapack_int ldx,\n                                 float* rcond, float* rpvgrw, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, lapack_complex_float* work,\n                                 float* rwork );\nlapack_int LAPACKE_zgbsvxx_work( int matrix_order, char fact, char trans,\n                                 lapack_int n, lapack_int kl, lapack_int ku,\n                                 lapack_int nrhs, lapack_complex_double* ab,\n                                 lapack_int ldab, lapack_complex_double* afb,\n                                 lapack_int ldafb, lapack_int* ipiv,\n                                 char* equed, double* r, double* c,\n                                 lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* x, lapack_int ldx,\n                                 double* rcond, double* rpvgrw, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, lapack_complex_double* work,\n                                 double* rwork );\n\nlapack_int LAPACKE_sgbtrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku, float* ab,\n                                lapack_int ldab, lapack_int* ipiv );\nlapack_int LAPACKE_dgbtrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku, double* ab,\n                                lapack_int ldab, lapack_int* ipiv );\nlapack_int LAPACKE_cgbtrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku,\n                                lapack_complex_float* ab, lapack_int ldab,\n                                lapack_int* ipiv );\nlapack_int LAPACKE_zgbtrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku,\n                                lapack_complex_double* ab, lapack_int ldab,\n                                lapack_int* ipiv );\n\nlapack_int LAPACKE_sgbtrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int kl, lapack_int ku, lapack_int nrhs,\n                                const float* ab, lapack_int ldab,\n                                const lapack_int* ipiv, float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_dgbtrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int kl, lapack_int ku, lapack_int nrhs,\n                                const double* ab, lapack_int ldab,\n                                const lapack_int* ipiv, double* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_cgbtrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int kl, lapack_int ku, lapack_int nrhs,\n                                const lapack_complex_float* ab, lapack_int ldab,\n                                const lapack_int* ipiv, lapack_complex_float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_zgbtrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int kl, lapack_int ku, lapack_int nrhs,\n                                const lapack_complex_double* ab,\n                                lapack_int ldab, const lapack_int* ipiv,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sgebak_work( int matrix_order, char job, char side,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                const float* scale, lapack_int m, float* v,\n                                lapack_int ldv );\nlapack_int LAPACKE_dgebak_work( int matrix_order, char job, char side,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                const double* scale, lapack_int m, double* v,\n                                lapack_int ldv );\nlapack_int LAPACKE_cgebak_work( int matrix_order, char job, char side,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                const float* scale, lapack_int m,\n                                lapack_complex_float* v, lapack_int ldv );\nlapack_int LAPACKE_zgebak_work( int matrix_order, char job, char side,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                const double* scale, lapack_int m,\n                                lapack_complex_double* v, lapack_int ldv );\n\nlapack_int LAPACKE_sgebal_work( int matrix_order, char job, lapack_int n,\n                                float* a, lapack_int lda, lapack_int* ilo,\n                                lapack_int* ihi, float* scale );\nlapack_int LAPACKE_dgebal_work( int matrix_order, char job, lapack_int n,\n                                double* a, lapack_int lda, lapack_int* ilo,\n                                lapack_int* ihi, double* scale );\nlapack_int LAPACKE_cgebal_work( int matrix_order, char job, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int* ilo, lapack_int* ihi,\n                                float* scale );\nlapack_int LAPACKE_zgebal_work( int matrix_order, char job, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* ilo, lapack_int* ihi,\n                                double* scale );\n\nlapack_int LAPACKE_sgebrd_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, float* d, float* e,\n                                float* tauq, float* taup, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dgebrd_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, double* d, double* e,\n                                double* tauq, double* taup, double* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_cgebrd_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                float* d, float* e, lapack_complex_float* tauq,\n                                lapack_complex_float* taup,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zgebrd_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                double* d, double* e,\n                                lapack_complex_double* tauq,\n                                lapack_complex_double* taup,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sgecon_work( int matrix_order, char norm, lapack_int n,\n                                const float* a, lapack_int lda, float anorm,\n                                float* rcond, float* work, lapack_int* iwork );\nlapack_int LAPACKE_dgecon_work( int matrix_order, char norm, lapack_int n,\n                                const double* a, lapack_int lda, double anorm,\n                                double* rcond, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_cgecon_work( int matrix_order, char norm, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                float anorm, float* rcond,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zgecon_work( int matrix_order, char norm, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                double anorm, double* rcond,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sgeequ_work( int matrix_order, lapack_int m, lapack_int n,\n                                const float* a, lapack_int lda, float* r,\n                                float* c, float* rowcnd, float* colcnd,\n                                float* amax );\nlapack_int LAPACKE_dgeequ_work( int matrix_order, lapack_int m, lapack_int n,\n                                const double* a, lapack_int lda, double* r,\n                                double* c, double* rowcnd, double* colcnd,\n                                double* amax );\nlapack_int LAPACKE_cgeequ_work( int matrix_order, lapack_int m, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                float* r, float* c, float* rowcnd,\n                                float* colcnd, float* amax );\nlapack_int LAPACKE_zgeequ_work( int matrix_order, lapack_int m, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                double* r, double* c, double* rowcnd,\n                                double* colcnd, double* amax );\n\nlapack_int LAPACKE_sgeequb_work( int matrix_order, lapack_int m, lapack_int n,\n                                 const float* a, lapack_int lda, float* r,\n                                 float* c, float* rowcnd, float* colcnd,\n                                 float* amax );\nlapack_int LAPACKE_dgeequb_work( int matrix_order, lapack_int m, lapack_int n,\n                                 const double* a, lapack_int lda, double* r,\n                                 double* c, double* rowcnd, double* colcnd,\n                                 double* amax );\nlapack_int LAPACKE_cgeequb_work( int matrix_order, lapack_int m, lapack_int n,\n                                 const lapack_complex_float* a, lapack_int lda,\n                                 float* r, float* c, float* rowcnd,\n                                 float* colcnd, float* amax );\nlapack_int LAPACKE_zgeequb_work( int matrix_order, lapack_int m, lapack_int n,\n                                 const lapack_complex_double* a, lapack_int lda,\n                                 double* r, double* c, double* rowcnd,\n                                 double* colcnd, double* amax );\n\nlapack_int LAPACKE_sgees_work( int matrix_order, char jobvs, char sort,\n                               LAPACK_S_SELECT2 select, lapack_int n, float* a,\n                               lapack_int lda, lapack_int* sdim, float* wr,\n                               float* wi, float* vs, lapack_int ldvs,\n                               float* work, lapack_int lwork,\n                               lapack_logical* bwork );\nlapack_int LAPACKE_dgees_work( int matrix_order, char jobvs, char sort,\n                               LAPACK_D_SELECT2 select, lapack_int n, double* a,\n                               lapack_int lda, lapack_int* sdim, double* wr,\n                               double* wi, double* vs, lapack_int ldvs,\n                               double* work, lapack_int lwork,\n                               lapack_logical* bwork );\nlapack_int LAPACKE_cgees_work( int matrix_order, char jobvs, char sort,\n                               LAPACK_C_SELECT1 select, lapack_int n,\n                               lapack_complex_float* a, lapack_int lda,\n                               lapack_int* sdim, lapack_complex_float* w,\n                               lapack_complex_float* vs, lapack_int ldvs,\n                               lapack_complex_float* work, lapack_int lwork,\n                               float* rwork, lapack_logical* bwork );\nlapack_int LAPACKE_zgees_work( int matrix_order, char jobvs, char sort,\n                               LAPACK_Z_SELECT1 select, lapack_int n,\n                               lapack_complex_double* a, lapack_int lda,\n                               lapack_int* sdim, lapack_complex_double* w,\n                               lapack_complex_double* vs, lapack_int ldvs,\n                               lapack_complex_double* work, lapack_int lwork,\n                               double* rwork, lapack_logical* bwork );\n\nlapack_int LAPACKE_sgeesx_work( int matrix_order, char jobvs, char sort,\n                                LAPACK_S_SELECT2 select, char sense,\n                                lapack_int n, float* a, lapack_int lda,\n                                lapack_int* sdim, float* wr, float* wi,\n                                float* vs, lapack_int ldvs, float* rconde,\n                                float* rcondv, float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork,\n                                lapack_logical* bwork );\nlapack_int LAPACKE_dgeesx_work( int matrix_order, char jobvs, char sort,\n                                LAPACK_D_SELECT2 select, char sense,\n                                lapack_int n, double* a, lapack_int lda,\n                                lapack_int* sdim, double* wr, double* wi,\n                                double* vs, lapack_int ldvs, double* rconde,\n                                double* rcondv, double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork,\n                                lapack_logical* bwork );\nlapack_int LAPACKE_cgeesx_work( int matrix_order, char jobvs, char sort,\n                                LAPACK_C_SELECT1 select, char sense,\n                                lapack_int n, lapack_complex_float* a,\n                                lapack_int lda, lapack_int* sdim,\n                                lapack_complex_float* w,\n                                lapack_complex_float* vs, lapack_int ldvs,\n                                float* rconde, float* rcondv,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork, lapack_logical* bwork );\nlapack_int LAPACKE_zgeesx_work( int matrix_order, char jobvs, char sort,\n                                LAPACK_Z_SELECT1 select, char sense,\n                                lapack_int n, lapack_complex_double* a,\n                                lapack_int lda, lapack_int* sdim,\n                                lapack_complex_double* w,\n                                lapack_complex_double* vs, lapack_int ldvs,\n                                double* rconde, double* rcondv,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork, lapack_logical* bwork );\n\nlapack_int LAPACKE_sgeev_work( int matrix_order, char jobvl, char jobvr,\n                               lapack_int n, float* a, lapack_int lda,\n                               float* wr, float* wi, float* vl, lapack_int ldvl,\n                               float* vr, lapack_int ldvr, float* work,\n                               lapack_int lwork );\nlapack_int LAPACKE_dgeev_work( int matrix_order, char jobvl, char jobvr,\n                               lapack_int n, double* a, lapack_int lda,\n                               double* wr, double* wi, double* vl,\n                               lapack_int ldvl, double* vr, lapack_int ldvr,\n                               double* work, lapack_int lwork );\nlapack_int LAPACKE_cgeev_work( int matrix_order, char jobvl, char jobvr,\n                               lapack_int n, lapack_complex_float* a,\n                               lapack_int lda, lapack_complex_float* w,\n                               lapack_complex_float* vl, lapack_int ldvl,\n                               lapack_complex_float* vr, lapack_int ldvr,\n                               lapack_complex_float* work, lapack_int lwork,\n                               float* rwork );\nlapack_int LAPACKE_zgeev_work( int matrix_order, char jobvl, char jobvr,\n                               lapack_int n, lapack_complex_double* a,\n                               lapack_int lda, lapack_complex_double* w,\n                               lapack_complex_double* vl, lapack_int ldvl,\n                               lapack_complex_double* vr, lapack_int ldvr,\n                               lapack_complex_double* work, lapack_int lwork,\n                               double* rwork );\n\nlapack_int LAPACKE_sgeevx_work( int matrix_order, char balanc, char jobvl,\n                                char jobvr, char sense, lapack_int n, float* a,\n                                lapack_int lda, float* wr, float* wi, float* vl,\n                                lapack_int ldvl, float* vr, lapack_int ldvr,\n                                lapack_int* ilo, lapack_int* ihi, float* scale,\n                                float* abnrm, float* rconde, float* rcondv,\n                                float* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dgeevx_work( int matrix_order, char balanc, char jobvl,\n                                char jobvr, char sense, lapack_int n, double* a,\n                                lapack_int lda, double* wr, double* wi,\n                                double* vl, lapack_int ldvl, double* vr,\n                                lapack_int ldvr, lapack_int* ilo,\n                                lapack_int* ihi, double* scale, double* abnrm,\n                                double* rconde, double* rcondv, double* work,\n                                lapack_int lwork, lapack_int* iwork );\nlapack_int LAPACKE_cgeevx_work( int matrix_order, char balanc, char jobvl,\n                                char jobvr, char sense, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* w,\n                                lapack_complex_float* vl, lapack_int ldvl,\n                                lapack_complex_float* vr, lapack_int ldvr,\n                                lapack_int* ilo, lapack_int* ihi, float* scale,\n                                float* abnrm, float* rconde, float* rcondv,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork );\nlapack_int LAPACKE_zgeevx_work( int matrix_order, char balanc, char jobvl,\n                                char jobvr, char sense, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* w,\n                                lapack_complex_double* vl, lapack_int ldvl,\n                                lapack_complex_double* vr, lapack_int ldvr,\n                                lapack_int* ilo, lapack_int* ihi, double* scale,\n                                double* abnrm, double* rconde, double* rcondv,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork );\n\nlapack_int LAPACKE_sgehrd_work( int matrix_order, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, float* a, lapack_int lda,\n                                float* tau, float* work, lapack_int lwork );\nlapack_int LAPACKE_dgehrd_work( int matrix_order, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, double* a, lapack_int lda,\n                                double* tau, double* work, lapack_int lwork );\nlapack_int LAPACKE_cgehrd_work( int matrix_order, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zgehrd_work( int matrix_order, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sgejsv_work( int matrix_order, char joba, char jobu,\n                                char jobv, char jobr, char jobt, char jobp,\n                                lapack_int m, lapack_int n, float* a,\n                                lapack_int lda, float* sva, float* u,\n                                lapack_int ldu, float* v, lapack_int ldv,\n                                float* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dgejsv_work( int matrix_order, char joba, char jobu,\n                                char jobv, char jobr, char jobt, char jobp,\n                                lapack_int m, lapack_int n, double* a,\n                                lapack_int lda, double* sva, double* u,\n                                lapack_int ldu, double* v, lapack_int ldv,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork );\n\nlapack_int LAPACKE_sgelq2_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, float* tau,\n                                float* work );\nlapack_int LAPACKE_dgelq2_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, double* tau,\n                                double* work );\nlapack_int LAPACKE_cgelq2_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* tau,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zgelq2_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* tau,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_sgelqf_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, float* tau,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dgelqf_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, double* tau,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_cgelqf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zgelqf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sgels_work( int matrix_order, char trans, lapack_int m,\n                               lapack_int n, lapack_int nrhs, float* a,\n                               lapack_int lda, float* b, lapack_int ldb,\n                               float* work, lapack_int lwork );\nlapack_int LAPACKE_dgels_work( int matrix_order, char trans, lapack_int m,\n                               lapack_int n, lapack_int nrhs, double* a,\n                               lapack_int lda, double* b, lapack_int ldb,\n                               double* work, lapack_int lwork );\nlapack_int LAPACKE_cgels_work( int matrix_order, char trans, lapack_int m,\n                               lapack_int n, lapack_int nrhs,\n                               lapack_complex_float* a, lapack_int lda,\n                               lapack_complex_float* b, lapack_int ldb,\n                               lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zgels_work( int matrix_order, char trans, lapack_int m,\n                               lapack_int n, lapack_int nrhs,\n                               lapack_complex_double* a, lapack_int lda,\n                               lapack_complex_double* b, lapack_int ldb,\n                               lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sgelsd_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, float* a, lapack_int lda,\n                                float* b, lapack_int ldb, float* s, float rcond,\n                                lapack_int* rank, float* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dgelsd_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, double* a, lapack_int lda,\n                                double* b, lapack_int ldb, double* s,\n                                double rcond, lapack_int* rank, double* work,\n                                lapack_int lwork, lapack_int* iwork );\nlapack_int LAPACKE_cgelsd_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* b,\n                                lapack_int ldb, float* s, float rcond,\n                                lapack_int* rank, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_zgelsd_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb, double* s, double rcond,\n                                lapack_int* rank, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork,\n                                lapack_int* iwork );\n\nlapack_int LAPACKE_sgelss_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, float* a, lapack_int lda,\n                                float* b, lapack_int ldb, float* s, float rcond,\n                                lapack_int* rank, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dgelss_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, double* a, lapack_int lda,\n                                double* b, lapack_int ldb, double* s,\n                                double rcond, lapack_int* rank, double* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_cgelss_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* b,\n                                lapack_int ldb, float* s, float rcond,\n                                lapack_int* rank, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork );\nlapack_int LAPACKE_zgelss_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb, double* s, double rcond,\n                                lapack_int* rank, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork );\n\nlapack_int LAPACKE_sgelsy_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, float* a, lapack_int lda,\n                                float* b, lapack_int ldb, lapack_int* jpvt,\n                                float rcond, lapack_int* rank, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dgelsy_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, double* a, lapack_int lda,\n                                double* b, lapack_int ldb, lapack_int* jpvt,\n                                double rcond, lapack_int* rank, double* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_cgelsy_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* b,\n                                lapack_int ldb, lapack_int* jpvt, float rcond,\n                                lapack_int* rank, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork );\nlapack_int LAPACKE_zgelsy_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb, lapack_int* jpvt, double rcond,\n                                lapack_int* rank, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork );\n\nlapack_int LAPACKE_sgeqlf_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, float* tau,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dgeqlf_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, double* tau,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_cgeqlf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zgeqlf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sgeqp3_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, lapack_int* jpvt,\n                                float* tau, float* work, lapack_int lwork );\nlapack_int LAPACKE_dgeqp3_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, lapack_int* jpvt,\n                                double* tau, double* work, lapack_int lwork );\nlapack_int LAPACKE_cgeqp3_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int* jpvt, lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork );\nlapack_int LAPACKE_zgeqp3_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* jpvt, lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork );\n\nlapack_int LAPACKE_sgeqpf_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, lapack_int* jpvt,\n                                float* tau, float* work );\nlapack_int LAPACKE_dgeqpf_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, lapack_int* jpvt,\n                                double* tau, double* work );\nlapack_int LAPACKE_cgeqpf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int* jpvt, lapack_complex_float* tau,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zgeqpf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* jpvt, lapack_complex_double* tau,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sgeqr2_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, float* tau,\n                                float* work );\nlapack_int LAPACKE_dgeqr2_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, double* tau,\n                                double* work );\nlapack_int LAPACKE_cgeqr2_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* tau,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zgeqr2_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* tau,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_sgeqrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, float* tau,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dgeqrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, double* tau,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_cgeqrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zgeqrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sgeqrfp_work( int matrix_order, lapack_int m, lapack_int n,\n                                 float* a, lapack_int lda, float* tau,\n                                 float* work, lapack_int lwork );\nlapack_int LAPACKE_dgeqrfp_work( int matrix_order, lapack_int m, lapack_int n,\n                                 double* a, lapack_int lda, double* tau,\n                                 double* work, lapack_int lwork );\nlapack_int LAPACKE_cgeqrfp_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 lapack_complex_float* tau,\n                                 lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zgeqrfp_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 lapack_complex_double* tau,\n                                 lapack_complex_double* work,\n                                 lapack_int lwork );\n\nlapack_int LAPACKE_sgerfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const float* a, lapack_int lda,\n                                const float* af, lapack_int ldaf,\n                                const lapack_int* ipiv, const float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* ferr, float* berr, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dgerfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const double* a,\n                                lapack_int lda, const double* af,\n                                lapack_int ldaf, const lapack_int* ipiv,\n                                const double* b, lapack_int ldb, double* x,\n                                lapack_int ldx, double* ferr, double* berr,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_cgerfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* af,\n                                lapack_int ldaf, const lapack_int* ipiv,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zgerfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_double* a,\n                                lapack_int lda, const lapack_complex_double* af,\n                                lapack_int ldaf, const lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sgerfsx_work( int matrix_order, char trans, char equed,\n                                 lapack_int n, lapack_int nrhs, const float* a,\n                                 lapack_int lda, const float* af,\n                                 lapack_int ldaf, const lapack_int* ipiv,\n                                 const float* r, const float* c, const float* b,\n                                 lapack_int ldb, float* x, lapack_int ldx,\n                                 float* rcond, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, float* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_dgerfsx_work( int matrix_order, char trans, char equed,\n                                 lapack_int n, lapack_int nrhs, const double* a,\n                                 lapack_int lda, const double* af,\n                                 lapack_int ldaf, const lapack_int* ipiv,\n                                 const double* r, const double* c,\n                                 const double* b, lapack_int ldb, double* x,\n                                 lapack_int ldx, double* rcond, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, double* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_cgerfsx_work( int matrix_order, char trans, char equed,\n                                 lapack_int n, lapack_int nrhs,\n                                 const lapack_complex_float* a, lapack_int lda,\n                                 const lapack_complex_float* af,\n                                 lapack_int ldaf, const lapack_int* ipiv,\n                                 const float* r, const float* c,\n                                 const lapack_complex_float* b, lapack_int ldb,\n                                 lapack_complex_float* x, lapack_int ldx,\n                                 float* rcond, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, lapack_complex_float* work,\n                                 float* rwork );\nlapack_int LAPACKE_zgerfsx_work( int matrix_order, char trans, char equed,\n                                 lapack_int n, lapack_int nrhs,\n                                 const lapack_complex_double* a, lapack_int lda,\n                                 const lapack_complex_double* af,\n                                 lapack_int ldaf, const lapack_int* ipiv,\n                                 const double* r, const double* c,\n                                 const lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* x, lapack_int ldx,\n                                 double* rcond, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, lapack_complex_double* work,\n                                 double* rwork );\n\nlapack_int LAPACKE_sgerqf_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, float* tau,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dgerqf_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, double* tau,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_cgerqf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zgerqf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sgesdd_work( int matrix_order, char jobz, lapack_int m,\n                                lapack_int n, float* a, lapack_int lda,\n                                float* s, float* u, lapack_int ldu, float* vt,\n                                lapack_int ldvt, float* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dgesdd_work( int matrix_order, char jobz, lapack_int m,\n                                lapack_int n, double* a, lapack_int lda,\n                                double* s, double* u, lapack_int ldu,\n                                double* vt, lapack_int ldvt, double* work,\n                                lapack_int lwork, lapack_int* iwork );\nlapack_int LAPACKE_cgesdd_work( int matrix_order, char jobz, lapack_int m,\n                                lapack_int n, lapack_complex_float* a,\n                                lapack_int lda, float* s,\n                                lapack_complex_float* u, lapack_int ldu,\n                                lapack_complex_float* vt, lapack_int ldvt,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork, lapack_int* iwork );\nlapack_int LAPACKE_zgesdd_work( int matrix_order, char jobz, lapack_int m,\n                                lapack_int n, lapack_complex_double* a,\n                                lapack_int lda, double* s,\n                                lapack_complex_double* u, lapack_int ldu,\n                                lapack_complex_double* vt, lapack_int ldvt,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork, lapack_int* iwork );\n\nlapack_int LAPACKE_sgesv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               float* a, lapack_int lda, lapack_int* ipiv,\n                               float* b, lapack_int ldb );\nlapack_int LAPACKE_dgesv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               double* a, lapack_int lda, lapack_int* ipiv,\n                               double* b, lapack_int ldb );\nlapack_int LAPACKE_cgesv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               lapack_complex_float* a, lapack_int lda,\n                               lapack_int* ipiv, lapack_complex_float* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_zgesv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               lapack_complex_double* a, lapack_int lda,\n                               lapack_int* ipiv, lapack_complex_double* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_dsgesv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                                double* a, lapack_int lda, lapack_int* ipiv,\n                                double* b, lapack_int ldb, double* x,\n                                lapack_int ldx, double* work, float* swork,\n                                lapack_int* iter );\nlapack_int LAPACKE_zcgesv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* ipiv, lapack_complex_double* b,\n                                lapack_int ldb, lapack_complex_double* x,\n                                lapack_int ldx, lapack_complex_double* work,\n                                lapack_complex_float* swork, double* rwork,\n                                lapack_int* iter );\n\nlapack_int LAPACKE_sgesvd_work( int matrix_order, char jobu, char jobvt,\n                                lapack_int m, lapack_int n, float* a,\n                                lapack_int lda, float* s, float* u,\n                                lapack_int ldu, float* vt, lapack_int ldvt,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dgesvd_work( int matrix_order, char jobu, char jobvt,\n                                lapack_int m, lapack_int n, double* a,\n                                lapack_int lda, double* s, double* u,\n                                lapack_int ldu, double* vt, lapack_int ldvt,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_cgesvd_work( int matrix_order, char jobu, char jobvt,\n                                lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                float* s, lapack_complex_float* u,\n                                lapack_int ldu, lapack_complex_float* vt,\n                                lapack_int ldvt, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork );\nlapack_int LAPACKE_zgesvd_work( int matrix_order, char jobu, char jobvt,\n                                lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                double* s, lapack_complex_double* u,\n                                lapack_int ldu, lapack_complex_double* vt,\n                                lapack_int ldvt, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork );\n\nlapack_int LAPACKE_sgesvj_work( int matrix_order, char joba, char jobu,\n                                char jobv, lapack_int m, lapack_int n, float* a,\n                                lapack_int lda, float* sva, lapack_int mv,\n                                float* v, lapack_int ldv, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dgesvj_work( int matrix_order, char joba, char jobu,\n                                char jobv, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, double* sva,\n                                lapack_int mv, double* v, lapack_int ldv,\n                                double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sgesvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int nrhs, float* a,\n                                lapack_int lda, float* af, lapack_int ldaf,\n                                lapack_int* ipiv, char* equed, float* r,\n                                float* c, float* b, lapack_int ldb, float* x,\n                                lapack_int ldx, float* rcond, float* ferr,\n                                float* berr, float* work, lapack_int* iwork );\nlapack_int LAPACKE_dgesvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int nrhs, double* a,\n                                lapack_int lda, double* af, lapack_int ldaf,\n                                lapack_int* ipiv, char* equed, double* r,\n                                double* c, double* b, lapack_int ldb, double* x,\n                                lapack_int ldx, double* rcond, double* ferr,\n                                double* berr, double* work, lapack_int* iwork );\nlapack_int LAPACKE_cgesvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int nrhs,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* af, lapack_int ldaf,\n                                lapack_int* ipiv, char* equed, float* r,\n                                float* c, lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* x,\n                                lapack_int ldx, float* rcond, float* ferr,\n                                float* berr, lapack_complex_float* work,\n                                float* rwork );\nlapack_int LAPACKE_zgesvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int nrhs,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* af, lapack_int ldaf,\n                                lapack_int* ipiv, char* equed, double* r,\n                                double* c, lapack_complex_double* b,\n                                lapack_int ldb, lapack_complex_double* x,\n                                lapack_int ldx, double* rcond, double* ferr,\n                                double* berr, lapack_complex_double* work,\n                                double* rwork );\n\nlapack_int LAPACKE_sgesvxx_work( int matrix_order, char fact, char trans,\n                                 lapack_int n, lapack_int nrhs, float* a,\n                                 lapack_int lda, float* af, lapack_int ldaf,\n                                 lapack_int* ipiv, char* equed, float* r,\n                                 float* c, float* b, lapack_int ldb, float* x,\n                                 lapack_int ldx, float* rcond, float* rpvgrw,\n                                 float* berr, lapack_int n_err_bnds,\n                                 float* err_bnds_norm, float* err_bnds_comp,\n                                 lapack_int nparams, float* params, float* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_dgesvxx_work( int matrix_order, char fact, char trans,\n                                 lapack_int n, lapack_int nrhs, double* a,\n                                 lapack_int lda, double* af, lapack_int ldaf,\n                                 lapack_int* ipiv, char* equed, double* r,\n                                 double* c, double* b, lapack_int ldb,\n                                 double* x, lapack_int ldx, double* rcond,\n                                 double* rpvgrw, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, double* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_cgesvxx_work( int matrix_order, char fact, char trans,\n                                 lapack_int n, lapack_int nrhs,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 lapack_complex_float* af, lapack_int ldaf,\n                                 lapack_int* ipiv, char* equed, float* r,\n                                 float* c, lapack_complex_float* b,\n                                 lapack_int ldb, lapack_complex_float* x,\n                                 lapack_int ldx, float* rcond, float* rpvgrw,\n                                 float* berr, lapack_int n_err_bnds,\n                                 float* err_bnds_norm, float* err_bnds_comp,\n                                 lapack_int nparams, float* params,\n                                 lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zgesvxx_work( int matrix_order, char fact, char trans,\n                                 lapack_int n, lapack_int nrhs,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 lapack_complex_double* af, lapack_int ldaf,\n                                 lapack_int* ipiv, char* equed, double* r,\n                                 double* c, lapack_complex_double* b,\n                                 lapack_int ldb, lapack_complex_double* x,\n                                 lapack_int ldx, double* rcond, double* rpvgrw,\n                                 double* berr, lapack_int n_err_bnds,\n                                 double* err_bnds_norm, double* err_bnds_comp,\n                                 lapack_int nparams, double* params,\n                                 lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sgetf2_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, lapack_int* ipiv );\nlapack_int LAPACKE_dgetf2_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, lapack_int* ipiv );\nlapack_int LAPACKE_cgetf2_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int* ipiv );\nlapack_int LAPACKE_zgetf2_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* ipiv );\n\nlapack_int LAPACKE_sgetrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, lapack_int* ipiv );\nlapack_int LAPACKE_dgetrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, lapack_int* ipiv );\nlapack_int LAPACKE_cgetrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int* ipiv );\nlapack_int LAPACKE_zgetrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* ipiv );\n\nlapack_int LAPACKE_sgetri_work( int matrix_order, lapack_int n, float* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dgetri_work( int matrix_order, lapack_int n, double* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_cgetri_work( int matrix_order, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                const lapack_int* ipiv,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zgetri_work( int matrix_order, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                const lapack_int* ipiv,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sgetrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const float* a, lapack_int lda,\n                                const lapack_int* ipiv, float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_dgetrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const double* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                double* b, lapack_int ldb );\nlapack_int LAPACKE_cgetrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zgetrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_double* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sggbak_work( int matrix_order, char job, char side,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                const float* lscale, const float* rscale,\n                                lapack_int m, float* v, lapack_int ldv );\nlapack_int LAPACKE_dggbak_work( int matrix_order, char job, char side,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                const double* lscale, const double* rscale,\n                                lapack_int m, double* v, lapack_int ldv );\nlapack_int LAPACKE_cggbak_work( int matrix_order, char job, char side,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                const float* lscale, const float* rscale,\n                                lapack_int m, lapack_complex_float* v,\n                                lapack_int ldv );\nlapack_int LAPACKE_zggbak_work( int matrix_order, char job, char side,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                const double* lscale, const double* rscale,\n                                lapack_int m, lapack_complex_double* v,\n                                lapack_int ldv );\n\nlapack_int LAPACKE_sggbal_work( int matrix_order, char job, lapack_int n,\n                                float* a, lapack_int lda, float* b,\n                                lapack_int ldb, lapack_int* ilo,\n                                lapack_int* ihi, float* lscale, float* rscale,\n                                float* work );\nlapack_int LAPACKE_dggbal_work( int matrix_order, char job, lapack_int n,\n                                double* a, lapack_int lda, double* b,\n                                lapack_int ldb, lapack_int* ilo,\n                                lapack_int* ihi, double* lscale, double* rscale,\n                                double* work );\nlapack_int LAPACKE_cggbal_work( int matrix_order, char job, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                lapack_int* ilo, lapack_int* ihi, float* lscale,\n                                float* rscale, float* work );\nlapack_int LAPACKE_zggbal_work( int matrix_order, char job, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                lapack_int* ilo, lapack_int* ihi,\n                                double* lscale, double* rscale, double* work );\n\nlapack_int LAPACKE_sgges_work( int matrix_order, char jobvsl, char jobvsr,\n                               char sort, LAPACK_S_SELECT3 selctg, lapack_int n,\n                               float* a, lapack_int lda, float* b,\n                               lapack_int ldb, lapack_int* sdim, float* alphar,\n                               float* alphai, float* beta, float* vsl,\n                               lapack_int ldvsl, float* vsr, lapack_int ldvsr,\n                               float* work, lapack_int lwork,\n                               lapack_logical* bwork );\nlapack_int LAPACKE_dgges_work( int matrix_order, char jobvsl, char jobvsr,\n                               char sort, LAPACK_D_SELECT3 selctg, lapack_int n,\n                               double* a, lapack_int lda, double* b,\n                               lapack_int ldb, lapack_int* sdim, double* alphar,\n                               double* alphai, double* beta, double* vsl,\n                               lapack_int ldvsl, double* vsr, lapack_int ldvsr,\n                               double* work, lapack_int lwork,\n                               lapack_logical* bwork );\nlapack_int LAPACKE_cgges_work( int matrix_order, char jobvsl, char jobvsr,\n                               char sort, LAPACK_C_SELECT2 selctg, lapack_int n,\n                               lapack_complex_float* a, lapack_int lda,\n                               lapack_complex_float* b, lapack_int ldb,\n                               lapack_int* sdim, lapack_complex_float* alpha,\n                               lapack_complex_float* beta,\n                               lapack_complex_float* vsl, lapack_int ldvsl,\n                               lapack_complex_float* vsr, lapack_int ldvsr,\n                               lapack_complex_float* work, lapack_int lwork,\n                               float* rwork, lapack_logical* bwork );\nlapack_int LAPACKE_zgges_work( int matrix_order, char jobvsl, char jobvsr,\n                               char sort, LAPACK_Z_SELECT2 selctg, lapack_int n,\n                               lapack_complex_double* a, lapack_int lda,\n                               lapack_complex_double* b, lapack_int ldb,\n                               lapack_int* sdim, lapack_complex_double* alpha,\n                               lapack_complex_double* beta,\n                               lapack_complex_double* vsl, lapack_int ldvsl,\n                               lapack_complex_double* vsr, lapack_int ldvsr,\n                               lapack_complex_double* work, lapack_int lwork,\n                               double* rwork, lapack_logical* bwork );\n\nlapack_int LAPACKE_sggesx_work( int matrix_order, char jobvsl, char jobvsr,\n                                char sort, LAPACK_S_SELECT3 selctg, char sense,\n                                lapack_int n, float* a, lapack_int lda,\n                                float* b, lapack_int ldb, lapack_int* sdim,\n                                float* alphar, float* alphai, float* beta,\n                                float* vsl, lapack_int ldvsl, float* vsr,\n                                lapack_int ldvsr, float* rconde, float* rcondv,\n                                float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork,\n                                lapack_logical* bwork );\nlapack_int LAPACKE_dggesx_work( int matrix_order, char jobvsl, char jobvsr,\n                                char sort, LAPACK_D_SELECT3 selctg, char sense,\n                                lapack_int n, double* a, lapack_int lda,\n                                double* b, lapack_int ldb, lapack_int* sdim,\n                                double* alphar, double* alphai, double* beta,\n                                double* vsl, lapack_int ldvsl, double* vsr,\n                                lapack_int ldvsr, double* rconde,\n                                double* rcondv, double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork,\n                                lapack_logical* bwork );\nlapack_int LAPACKE_cggesx_work( int matrix_order, char jobvsl, char jobvsr,\n                                char sort, LAPACK_C_SELECT2 selctg, char sense,\n                                lapack_int n, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* b,\n                                lapack_int ldb, lapack_int* sdim,\n                                lapack_complex_float* alpha,\n                                lapack_complex_float* beta,\n                                lapack_complex_float* vsl, lapack_int ldvsl,\n                                lapack_complex_float* vsr, lapack_int ldvsr,\n                                float* rconde, float* rcondv,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork, lapack_int* iwork,\n                                lapack_int liwork, lapack_logical* bwork );\nlapack_int LAPACKE_zggesx_work( int matrix_order, char jobvsl, char jobvsr,\n                                char sort, LAPACK_Z_SELECT2 selctg, char sense,\n                                lapack_int n, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb, lapack_int* sdim,\n                                lapack_complex_double* alpha,\n                                lapack_complex_double* beta,\n                                lapack_complex_double* vsl, lapack_int ldvsl,\n                                lapack_complex_double* vsr, lapack_int ldvsr,\n                                double* rconde, double* rcondv,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork, lapack_int* iwork,\n                                lapack_int liwork, lapack_logical* bwork );\n\nlapack_int LAPACKE_sggev_work( int matrix_order, char jobvl, char jobvr,\n                               lapack_int n, float* a, lapack_int lda, float* b,\n                               lapack_int ldb, float* alphar, float* alphai,\n                               float* beta, float* vl, lapack_int ldvl,\n                               float* vr, lapack_int ldvr, float* work,\n                               lapack_int lwork );\nlapack_int LAPACKE_dggev_work( int matrix_order, char jobvl, char jobvr,\n                               lapack_int n, double* a, lapack_int lda,\n                               double* b, lapack_int ldb, double* alphar,\n                               double* alphai, double* beta, double* vl,\n                               lapack_int ldvl, double* vr, lapack_int ldvr,\n                               double* work, lapack_int lwork );\nlapack_int LAPACKE_cggev_work( int matrix_order, char jobvl, char jobvr,\n                               lapack_int n, lapack_complex_float* a,\n                               lapack_int lda, lapack_complex_float* b,\n                               lapack_int ldb, lapack_complex_float* alpha,\n                               lapack_complex_float* beta,\n                               lapack_complex_float* vl, lapack_int ldvl,\n                               lapack_complex_float* vr, lapack_int ldvr,\n                               lapack_complex_float* work, lapack_int lwork,\n                               float* rwork );\nlapack_int LAPACKE_zggev_work( int matrix_order, char jobvl, char jobvr,\n                               lapack_int n, lapack_complex_double* a,\n                               lapack_int lda, lapack_complex_double* b,\n                               lapack_int ldb, lapack_complex_double* alpha,\n                               lapack_complex_double* beta,\n                               lapack_complex_double* vl, lapack_int ldvl,\n                               lapack_complex_double* vr, lapack_int ldvr,\n                               lapack_complex_double* work, lapack_int lwork,\n                               double* rwork );\n\nlapack_int LAPACKE_sggevx_work( int matrix_order, char balanc, char jobvl,\n                                char jobvr, char sense, lapack_int n, float* a,\n                                lapack_int lda, float* b, lapack_int ldb,\n                                float* alphar, float* alphai, float* beta,\n                                float* vl, lapack_int ldvl, float* vr,\n                                lapack_int ldvr, lapack_int* ilo,\n                                lapack_int* ihi, float* lscale, float* rscale,\n                                float* abnrm, float* bbnrm, float* rconde,\n                                float* rcondv, float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_logical* bwork );\nlapack_int LAPACKE_dggevx_work( int matrix_order, char balanc, char jobvl,\n                                char jobvr, char sense, lapack_int n, double* a,\n                                lapack_int lda, double* b, lapack_int ldb,\n                                double* alphar, double* alphai, double* beta,\n                                double* vl, lapack_int ldvl, double* vr,\n                                lapack_int ldvr, lapack_int* ilo,\n                                lapack_int* ihi, double* lscale, double* rscale,\n                                double* abnrm, double* bbnrm, double* rconde,\n                                double* rcondv, double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_logical* bwork );\nlapack_int LAPACKE_cggevx_work( int matrix_order, char balanc, char jobvl,\n                                char jobvr, char sense, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* alpha,\n                                lapack_complex_float* beta,\n                                lapack_complex_float* vl, lapack_int ldvl,\n                                lapack_complex_float* vr, lapack_int ldvr,\n                                lapack_int* ilo, lapack_int* ihi, float* lscale,\n                                float* rscale, float* abnrm, float* bbnrm,\n                                float* rconde, float* rcondv,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork, lapack_int* iwork,\n                                lapack_logical* bwork );\nlapack_int LAPACKE_zggevx_work( int matrix_order, char balanc, char jobvl,\n                                char jobvr, char sense, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* alpha,\n                                lapack_complex_double* beta,\n                                lapack_complex_double* vl, lapack_int ldvl,\n                                lapack_complex_double* vr, lapack_int ldvr,\n                                lapack_int* ilo, lapack_int* ihi,\n                                double* lscale, double* rscale, double* abnrm,\n                                double* bbnrm, double* rconde, double* rcondv,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork, lapack_int* iwork,\n                                lapack_logical* bwork );\n\nlapack_int LAPACKE_sggglm_work( int matrix_order, lapack_int n, lapack_int m,\n                                lapack_int p, float* a, lapack_int lda,\n                                float* b, lapack_int ldb, float* d, float* x,\n                                float* y, float* work, lapack_int lwork );\nlapack_int LAPACKE_dggglm_work( int matrix_order, lapack_int n, lapack_int m,\n                                lapack_int p, double* a, lapack_int lda,\n                                double* b, lapack_int ldb, double* d, double* x,\n                                double* y, double* work, lapack_int lwork );\nlapack_int LAPACKE_cggglm_work( int matrix_order, lapack_int n, lapack_int m,\n                                lapack_int p, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* d,\n                                lapack_complex_float* x,\n                                lapack_complex_float* y,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zggglm_work( int matrix_order, lapack_int n, lapack_int m,\n                                lapack_int p, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb, lapack_complex_double* d,\n                                lapack_complex_double* x,\n                                lapack_complex_double* y,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sgghrd_work( int matrix_order, char compq, char compz,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                float* a, lapack_int lda, float* b,\n                                lapack_int ldb, float* q, lapack_int ldq,\n                                float* z, lapack_int ldz );\nlapack_int LAPACKE_dgghrd_work( int matrix_order, char compq, char compz,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                double* a, lapack_int lda, double* b,\n                                lapack_int ldb, double* q, lapack_int ldq,\n                                double* z, lapack_int ldz );\nlapack_int LAPACKE_cgghrd_work( int matrix_order, char compq, char compz,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* q, lapack_int ldq,\n                                lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zgghrd_work( int matrix_order, char compq, char compz,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* q, lapack_int ldq,\n                                lapack_complex_double* z, lapack_int ldz );\n\nlapack_int LAPACKE_sgglse_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int p, float* a, lapack_int lda,\n                                float* b, lapack_int ldb, float* c, float* d,\n                                float* x, float* work, lapack_int lwork );\nlapack_int LAPACKE_dgglse_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int p, double* a, lapack_int lda,\n                                double* b, lapack_int ldb, double* c, double* d,\n                                double* x, double* work, lapack_int lwork );\nlapack_int LAPACKE_cgglse_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int p, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* c,\n                                lapack_complex_float* d,\n                                lapack_complex_float* x,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zgglse_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int p, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb, lapack_complex_double* c,\n                                lapack_complex_double* d,\n                                lapack_complex_double* x,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sggqrf_work( int matrix_order, lapack_int n, lapack_int m,\n                                lapack_int p, float* a, lapack_int lda,\n                                float* taua, float* b, lapack_int ldb,\n                                float* taub, float* work, lapack_int lwork );\nlapack_int LAPACKE_dggqrf_work( int matrix_order, lapack_int n, lapack_int m,\n                                lapack_int p, double* a, lapack_int lda,\n                                double* taua, double* b, lapack_int ldb,\n                                double* taub, double* work, lapack_int lwork );\nlapack_int LAPACKE_cggqrf_work( int matrix_order, lapack_int n, lapack_int m,\n                                lapack_int p, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* taua,\n                                lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* taub,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zggqrf_work( int matrix_order, lapack_int n, lapack_int m,\n                                lapack_int p, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* taua,\n                                lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* taub,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sggrqf_work( int matrix_order, lapack_int m, lapack_int p,\n                                lapack_int n, float* a, lapack_int lda,\n                                float* taua, float* b, lapack_int ldb,\n                                float* taub, float* work, lapack_int lwork );\nlapack_int LAPACKE_dggrqf_work( int matrix_order, lapack_int m, lapack_int p,\n                                lapack_int n, double* a, lapack_int lda,\n                                double* taua, double* b, lapack_int ldb,\n                                double* taub, double* work, lapack_int lwork );\nlapack_int LAPACKE_cggrqf_work( int matrix_order, lapack_int m, lapack_int p,\n                                lapack_int n, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* taua,\n                                lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* taub,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zggrqf_work( int matrix_order, lapack_int m, lapack_int p,\n                                lapack_int n, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* taua,\n                                lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* taub,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sggsvd_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int n,\n                                lapack_int p, lapack_int* k, lapack_int* l,\n                                float* a, lapack_int lda, float* b,\n                                lapack_int ldb, float* alpha, float* beta,\n                                float* u, lapack_int ldu, float* v,\n                                lapack_int ldv, float* q, lapack_int ldq,\n                                float* work, lapack_int* iwork );\nlapack_int LAPACKE_dggsvd_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int n,\n                                lapack_int p, lapack_int* k, lapack_int* l,\n                                double* a, lapack_int lda, double* b,\n                                lapack_int ldb, double* alpha, double* beta,\n                                double* u, lapack_int ldu, double* v,\n                                lapack_int ldv, double* q, lapack_int ldq,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_cggsvd_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int n,\n                                lapack_int p, lapack_int* k, lapack_int* l,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                float* alpha, float* beta,\n                                lapack_complex_float* u, lapack_int ldu,\n                                lapack_complex_float* v, lapack_int ldv,\n                                lapack_complex_float* q, lapack_int ldq,\n                                lapack_complex_float* work, float* rwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_zggsvd_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int n,\n                                lapack_int p, lapack_int* k, lapack_int* l,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                double* alpha, double* beta,\n                                lapack_complex_double* u, lapack_int ldu,\n                                lapack_complex_double* v, lapack_int ldv,\n                                lapack_complex_double* q, lapack_int ldq,\n                                lapack_complex_double* work, double* rwork,\n                                lapack_int* iwork );\n\nlapack_int LAPACKE_sggsvp_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int p,\n                                lapack_int n, float* a, lapack_int lda,\n                                float* b, lapack_int ldb, float tola,\n                                float tolb, lapack_int* k, lapack_int* l,\n                                float* u, lapack_int ldu, float* v,\n                                lapack_int ldv, float* q, lapack_int ldq,\n                                lapack_int* iwork, float* tau, float* work );\nlapack_int LAPACKE_dggsvp_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int p,\n                                lapack_int n, double* a, lapack_int lda,\n                                double* b, lapack_int ldb, double tola,\n                                double tolb, lapack_int* k, lapack_int* l,\n                                double* u, lapack_int ldu, double* v,\n                                lapack_int ldv, double* q, lapack_int ldq,\n                                lapack_int* iwork, double* tau, double* work );\nlapack_int LAPACKE_cggsvp_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int p,\n                                lapack_int n, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* b,\n                                lapack_int ldb, float tola, float tolb,\n                                lapack_int* k, lapack_int* l,\n                                lapack_complex_float* u, lapack_int ldu,\n                                lapack_complex_float* v, lapack_int ldv,\n                                lapack_complex_float* q, lapack_int ldq,\n                                lapack_int* iwork, float* rwork,\n                                lapack_complex_float* tau,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zggsvp_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int p,\n                                lapack_int n, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb, double tola, double tolb,\n                                lapack_int* k, lapack_int* l,\n                                lapack_complex_double* u, lapack_int ldu,\n                                lapack_complex_double* v, lapack_int ldv,\n                                lapack_complex_double* q, lapack_int ldq,\n                                lapack_int* iwork, double* rwork,\n                                lapack_complex_double* tau,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_sgtcon_work( char norm, lapack_int n, const float* dl,\n                                const float* d, const float* du,\n                                const float* du2, const lapack_int* ipiv,\n                                float anorm, float* rcond, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dgtcon_work( char norm, lapack_int n, const double* dl,\n                                const double* d, const double* du,\n                                const double* du2, const lapack_int* ipiv,\n                                double anorm, double* rcond, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_cgtcon_work( char norm, lapack_int n,\n                                const lapack_complex_float* dl,\n                                const lapack_complex_float* d,\n                                const lapack_complex_float* du,\n                                const lapack_complex_float* du2,\n                                const lapack_int* ipiv, float anorm,\n                                float* rcond, lapack_complex_float* work );\nlapack_int LAPACKE_zgtcon_work( char norm, lapack_int n,\n                                const lapack_complex_double* dl,\n                                const lapack_complex_double* d,\n                                const lapack_complex_double* du,\n                                const lapack_complex_double* du2,\n                                const lapack_int* ipiv, double anorm,\n                                double* rcond, lapack_complex_double* work );\n\nlapack_int LAPACKE_sgtrfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const float* dl,\n                                const float* d, const float* du,\n                                const float* dlf, const float* df,\n                                const float* duf, const float* du2,\n                                const lapack_int* ipiv, const float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* ferr, float* berr, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dgtrfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const double* dl,\n                                const double* d, const double* du,\n                                const double* dlf, const double* df,\n                                const double* duf, const double* du2,\n                                const lapack_int* ipiv, const double* b,\n                                lapack_int ldb, double* x, lapack_int ldx,\n                                double* ferr, double* berr, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_cgtrfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* dl,\n                                const lapack_complex_float* d,\n                                const lapack_complex_float* du,\n                                const lapack_complex_float* dlf,\n                                const lapack_complex_float* df,\n                                const lapack_complex_float* duf,\n                                const lapack_complex_float* du2,\n                                const lapack_int* ipiv,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zgtrfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs,\n                                const lapack_complex_double* dl,\n                                const lapack_complex_double* d,\n                                const lapack_complex_double* du,\n                                const lapack_complex_double* dlf,\n                                const lapack_complex_double* df,\n                                const lapack_complex_double* duf,\n                                const lapack_complex_double* du2,\n                                const lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sgtsv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               float* dl, float* d, float* du, float* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_dgtsv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               double* dl, double* d, double* du, double* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_cgtsv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               lapack_complex_float* dl,\n                               lapack_complex_float* d,\n                               lapack_complex_float* du,\n                               lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zgtsv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               lapack_complex_double* dl,\n                               lapack_complex_double* d,\n                               lapack_complex_double* du,\n                               lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sgtsvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int nrhs, const float* dl,\n                                const float* d, const float* du, float* dlf,\n                                float* df, float* duf, float* du2,\n                                lapack_int* ipiv, const float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* rcond, float* ferr, float* berr,\n                                float* work, lapack_int* iwork );\nlapack_int LAPACKE_dgtsvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int nrhs, const double* dl,\n                                const double* d, const double* du, double* dlf,\n                                double* df, double* duf, double* du2,\n                                lapack_int* ipiv, const double* b,\n                                lapack_int ldb, double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_cgtsvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_float* dl,\n                                const lapack_complex_float* d,\n                                const lapack_complex_float* du,\n                                lapack_complex_float* dlf,\n                                lapack_complex_float* df,\n                                lapack_complex_float* duf,\n                                lapack_complex_float* du2, lapack_int* ipiv,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* rcond, float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zgtsvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_double* dl,\n                                const lapack_complex_double* d,\n                                const lapack_complex_double* du,\n                                lapack_complex_double* dlf,\n                                lapack_complex_double* df,\n                                lapack_complex_double* duf,\n                                lapack_complex_double* du2, lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sgttrf_work( lapack_int n, float* dl, float* d, float* du,\n                                float* du2, lapack_int* ipiv );\nlapack_int LAPACKE_dgttrf_work( lapack_int n, double* dl, double* d, double* du,\n                                double* du2, lapack_int* ipiv );\nlapack_int LAPACKE_cgttrf_work( lapack_int n, lapack_complex_float* dl,\n                                lapack_complex_float* d,\n                                lapack_complex_float* du,\n                                lapack_complex_float* du2, lapack_int* ipiv );\nlapack_int LAPACKE_zgttrf_work( lapack_int n, lapack_complex_double* dl,\n                                lapack_complex_double* d,\n                                lapack_complex_double* du,\n                                lapack_complex_double* du2, lapack_int* ipiv );\n\nlapack_int LAPACKE_sgttrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const float* dl,\n                                const float* d, const float* du,\n                                const float* du2, const lapack_int* ipiv,\n                                float* b, lapack_int ldb );\nlapack_int LAPACKE_dgttrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const double* dl,\n                                const double* d, const double* du,\n                                const double* du2, const lapack_int* ipiv,\n                                double* b, lapack_int ldb );\nlapack_int LAPACKE_cgttrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* dl,\n                                const lapack_complex_float* d,\n                                const lapack_complex_float* du,\n                                const lapack_complex_float* du2,\n                                const lapack_int* ipiv, lapack_complex_float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_zgttrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs,\n                                const lapack_complex_double* dl,\n                                const lapack_complex_double* d,\n                                const lapack_complex_double* du,\n                                const lapack_complex_double* du2,\n                                const lapack_int* ipiv,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_chbev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_int kd,\n                               lapack_complex_float* ab, lapack_int ldab,\n                               float* w, lapack_complex_float* z,\n                               lapack_int ldz, lapack_complex_float* work,\n                               float* rwork );\nlapack_int LAPACKE_zhbev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_int kd,\n                               lapack_complex_double* ab, lapack_int ldab,\n                               double* w, lapack_complex_double* z,\n                               lapack_int ldz, lapack_complex_double* work,\n                               double* rwork );\n\nlapack_int LAPACKE_chbevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_int kd,\n                                lapack_complex_float* ab, lapack_int ldab,\n                                float* w, lapack_complex_float* z,\n                                lapack_int ldz, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork,\n                                lapack_int lrwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_zhbevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_int kd,\n                                lapack_complex_double* ab, lapack_int ldab,\n                                double* w, lapack_complex_double* z,\n                                lapack_int ldz, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork,\n                                lapack_int lrwork, lapack_int* iwork,\n                                lapack_int liwork );\n\nlapack_int LAPACKE_chbevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, lapack_int kd,\n                                lapack_complex_float* ab, lapack_int ldab,\n                                lapack_complex_float* q, lapack_int ldq,\n                                float vl, float vu, lapack_int il,\n                                lapack_int iu, float abstol, lapack_int* m,\n                                float* w, lapack_complex_float* z,\n                                lapack_int ldz, lapack_complex_float* work,\n                                float* rwork, lapack_int* iwork,\n                                lapack_int* ifail );\nlapack_int LAPACKE_zhbevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, lapack_int kd,\n                                lapack_complex_double* ab, lapack_int ldab,\n                                lapack_complex_double* q, lapack_int ldq,\n                                double vl, double vu, lapack_int il,\n                                lapack_int iu, double abstol, lapack_int* m,\n                                double* w, lapack_complex_double* z,\n                                lapack_int ldz, lapack_complex_double* work,\n                                double* rwork, lapack_int* iwork,\n                                lapack_int* ifail );\n\nlapack_int LAPACKE_chbgst_work( int matrix_order, char vect, char uplo,\n                                lapack_int n, lapack_int ka, lapack_int kb,\n                                lapack_complex_float* ab, lapack_int ldab,\n                                const lapack_complex_float* bb, lapack_int ldbb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zhbgst_work( int matrix_order, char vect, char uplo,\n                                lapack_int n, lapack_int ka, lapack_int kb,\n                                lapack_complex_double* ab, lapack_int ldab,\n                                const lapack_complex_double* bb,\n                                lapack_int ldbb, lapack_complex_double* x,\n                                lapack_int ldx, lapack_complex_double* work,\n                                double* rwork );\n\nlapack_int LAPACKE_chbgv_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_int ka, lapack_int kb,\n                               lapack_complex_float* ab, lapack_int ldab,\n                               lapack_complex_float* bb, lapack_int ldbb,\n                               float* w, lapack_complex_float* z,\n                               lapack_int ldz, lapack_complex_float* work,\n                               float* rwork );\nlapack_int LAPACKE_zhbgv_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_int ka, lapack_int kb,\n                               lapack_complex_double* ab, lapack_int ldab,\n                               lapack_complex_double* bb, lapack_int ldbb,\n                               double* w, lapack_complex_double* z,\n                               lapack_int ldz, lapack_complex_double* work,\n                               double* rwork );\n\nlapack_int LAPACKE_chbgvd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_int ka, lapack_int kb,\n                                lapack_complex_float* ab, lapack_int ldab,\n                                lapack_complex_float* bb, lapack_int ldbb,\n                                float* w, lapack_complex_float* z,\n                                lapack_int ldz, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork,\n                                lapack_int lrwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_zhbgvd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_int ka, lapack_int kb,\n                                lapack_complex_double* ab, lapack_int ldab,\n                                lapack_complex_double* bb, lapack_int ldbb,\n                                double* w, lapack_complex_double* z,\n                                lapack_int ldz, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork,\n                                lapack_int lrwork, lapack_int* iwork,\n                                lapack_int liwork );\n\nlapack_int LAPACKE_chbgvx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, lapack_int ka,\n                                lapack_int kb, lapack_complex_float* ab,\n                                lapack_int ldab, lapack_complex_float* bb,\n                                lapack_int ldbb, lapack_complex_float* q,\n                                lapack_int ldq, float vl, float vu,\n                                lapack_int il, lapack_int iu, float abstol,\n                                lapack_int* m, float* w,\n                                lapack_complex_float* z, lapack_int ldz,\n                                lapack_complex_float* work, float* rwork,\n                                lapack_int* iwork, lapack_int* ifail );\nlapack_int LAPACKE_zhbgvx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, lapack_int ka,\n                                lapack_int kb, lapack_complex_double* ab,\n                                lapack_int ldab, lapack_complex_double* bb,\n                                lapack_int ldbb, lapack_complex_double* q,\n                                lapack_int ldq, double vl, double vu,\n                                lapack_int il, lapack_int iu, double abstol,\n                                lapack_int* m, double* w,\n                                lapack_complex_double* z, lapack_int ldz,\n                                lapack_complex_double* work, double* rwork,\n                                lapack_int* iwork, lapack_int* ifail );\n\nlapack_int LAPACKE_chbtrd_work( int matrix_order, char vect, char uplo,\n                                lapack_int n, lapack_int kd,\n                                lapack_complex_float* ab, lapack_int ldab,\n                                float* d, float* e, lapack_complex_float* q,\n                                lapack_int ldq, lapack_complex_float* work );\nlapack_int LAPACKE_zhbtrd_work( int matrix_order, char vect, char uplo,\n                                lapack_int n, lapack_int kd,\n                                lapack_complex_double* ab, lapack_int ldab,\n                                double* d, double* e, lapack_complex_double* q,\n                                lapack_int ldq, lapack_complex_double* work );\n\nlapack_int LAPACKE_checon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_int* ipiv, float anorm,\n                                float* rcond, lapack_complex_float* work );\nlapack_int LAPACKE_zhecon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_int* ipiv, double anorm,\n                                double* rcond, lapack_complex_double* work );\n\nlapack_int LAPACKE_cheequb_work( int matrix_order, char uplo, lapack_int n,\n                                 const lapack_complex_float* a, lapack_int lda,\n                                 float* s, float* scond, float* amax,\n                                 lapack_complex_float* work );\nlapack_int LAPACKE_zheequb_work( int matrix_order, char uplo, lapack_int n,\n                                 const lapack_complex_double* a, lapack_int lda,\n                                 double* s, double* scond, double* amax,\n                                 lapack_complex_double* work );\n\nlapack_int LAPACKE_cheev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_complex_float* a,\n                               lapack_int lda, float* w,\n                               lapack_complex_float* work, lapack_int lwork,\n                               float* rwork );\nlapack_int LAPACKE_zheev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_complex_double* a,\n                               lapack_int lda, double* w,\n                               lapack_complex_double* work, lapack_int lwork,\n                               double* rwork );\n\nlapack_int LAPACKE_cheevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_complex_float* a,\n                                lapack_int lda, float* w,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork, lapack_int lrwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_zheevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_complex_double* a,\n                                lapack_int lda, double* w,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork, lapack_int lrwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_cheevr_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                float vl, float vu, lapack_int il,\n                                lapack_int iu, float abstol, lapack_int* m,\n                                float* w, lapack_complex_float* z,\n                                lapack_int ldz, lapack_int* isuppz,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork, lapack_int lrwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_zheevr_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                double vl, double vu, lapack_int il,\n                                lapack_int iu, double abstol, lapack_int* m,\n                                double* w, lapack_complex_double* z,\n                                lapack_int ldz, lapack_int* isuppz,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork, lapack_int lrwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_cheevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                float vl, float vu, lapack_int il,\n                                lapack_int iu, float abstol, lapack_int* m,\n                                float* w, lapack_complex_float* z,\n                                lapack_int ldz, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork,\n                                lapack_int* iwork, lapack_int* ifail );\nlapack_int LAPACKE_zheevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                double vl, double vu, lapack_int il,\n                                lapack_int iu, double abstol, lapack_int* m,\n                                double* w, lapack_complex_double* z,\n                                lapack_int ldz, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork,\n                                lapack_int* iwork, lapack_int* ifail );\n\nlapack_int LAPACKE_chegst_work( int matrix_order, lapack_int itype, char uplo,\n                                lapack_int n, lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_zhegst_work( int matrix_order, lapack_int itype, char uplo,\n                                lapack_int n, lapack_complex_double* a,\n                                lapack_int lda, const lapack_complex_double* b,\n                                lapack_int ldb );\n\nlapack_int LAPACKE_chegv_work( int matrix_order, lapack_int itype, char jobz,\n                               char uplo, lapack_int n, lapack_complex_float* a,\n                               lapack_int lda, lapack_complex_float* b,\n                               lapack_int ldb, float* w,\n                               lapack_complex_float* work, lapack_int lwork,\n                               float* rwork );\nlapack_int LAPACKE_zhegv_work( int matrix_order, lapack_int itype, char jobz,\n                               char uplo, lapack_int n,\n                               lapack_complex_double* a, lapack_int lda,\n                               lapack_complex_double* b, lapack_int ldb,\n                               double* w, lapack_complex_double* work,\n                               lapack_int lwork, double* rwork );\n\nlapack_int LAPACKE_chegvd_work( int matrix_order, lapack_int itype, char jobz,\n                                char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                float* w, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork,\n                                lapack_int lrwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_zhegvd_work( int matrix_order, lapack_int itype, char jobz,\n                                char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                double* w, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork,\n                                lapack_int lrwork, lapack_int* iwork,\n                                lapack_int liwork );\n\nlapack_int LAPACKE_chegvx_work( int matrix_order, lapack_int itype, char jobz,\n                                char range, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                float vl, float vu, lapack_int il,\n                                lapack_int iu, float abstol, lapack_int* m,\n                                float* w, lapack_complex_float* z,\n                                lapack_int ldz, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork,\n                                lapack_int* iwork, lapack_int* ifail );\nlapack_int LAPACKE_zhegvx_work( int matrix_order, lapack_int itype, char jobz,\n                                char range, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                double vl, double vu, lapack_int il,\n                                lapack_int iu, double abstol, lapack_int* m,\n                                double* w, lapack_complex_double* z,\n                                lapack_int ldz, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork,\n                                lapack_int* iwork, lapack_int* ifail );\n\nlapack_int LAPACKE_cherfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* af,\n                                lapack_int ldaf, const lapack_int* ipiv,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zherfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_double* a,\n                                lapack_int lda, const lapack_complex_double* af,\n                                lapack_int ldaf, const lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_cherfsx_work( int matrix_order, char uplo, char equed,\n                                 lapack_int n, lapack_int nrhs,\n                                 const lapack_complex_float* a, lapack_int lda,\n                                 const lapack_complex_float* af,\n                                 lapack_int ldaf, const lapack_int* ipiv,\n                                 const float* s, const lapack_complex_float* b,\n                                 lapack_int ldb, lapack_complex_float* x,\n                                 lapack_int ldx, float* rcond, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, lapack_complex_float* work,\n                                 float* rwork );\nlapack_int LAPACKE_zherfsx_work( int matrix_order, char uplo, char equed,\n                                 lapack_int n, lapack_int nrhs,\n                                 const lapack_complex_double* a, lapack_int lda,\n                                 const lapack_complex_double* af,\n                                 lapack_int ldaf, const lapack_int* ipiv,\n                                 const double* s,\n                                 const lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* x, lapack_int ldx,\n                                 double* rcond, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, lapack_complex_double* work,\n                                 double* rwork );\n\nlapack_int LAPACKE_chesv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_float* a,\n                               lapack_int lda, lapack_int* ipiv,\n                               lapack_complex_float* b, lapack_int ldb,\n                               lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zhesv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_double* a,\n                               lapack_int lda, lapack_int* ipiv,\n                               lapack_complex_double* b, lapack_int ldb,\n                               lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_chesvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* af, lapack_int ldaf,\n                                lapack_int* ipiv, const lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* x,\n                                lapack_int ldx, float* rcond, float* ferr,\n                                float* berr, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork );\nlapack_int LAPACKE_zhesvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* af, lapack_int ldaf,\n                                lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork );\n\nlapack_int LAPACKE_chesvxx_work( int matrix_order, char fact, char uplo,\n                                 lapack_int n, lapack_int nrhs,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 lapack_complex_float* af, lapack_int ldaf,\n                                 lapack_int* ipiv, char* equed, float* s,\n                                 lapack_complex_float* b, lapack_int ldb,\n                                 lapack_complex_float* x, lapack_int ldx,\n                                 float* rcond, float* rpvgrw, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, lapack_complex_float* work,\n                                 float* rwork );\nlapack_int LAPACKE_zhesvxx_work( int matrix_order, char fact, char uplo,\n                                 lapack_int n, lapack_int nrhs,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 lapack_complex_double* af, lapack_int ldaf,\n                                 lapack_int* ipiv, char* equed, double* s,\n                                 lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* x, lapack_int ldx,\n                                 double* rcond, double* rpvgrw, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, lapack_complex_double* work,\n                                 double* rwork );\n\nlapack_int LAPACKE_chetrd_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                float* d, float* e, lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zhetrd_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                double* d, double* e,\n                                lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_chetrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int* ipiv, lapack_complex_float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_zhetrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* ipiv, lapack_complex_double* work,\n                                lapack_int lwork );\n\nlapack_int LAPACKE_chetri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                const lapack_int* ipiv,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zhetri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                const lapack_int* ipiv,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_chetrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zhetrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_double* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_chfrk_work( int matrix_order, char transr, char uplo,\n                               char trans, lapack_int n, lapack_int k,\n                               float alpha, const lapack_complex_float* a,\n                               lapack_int lda, float beta,\n                               lapack_complex_float* c );\nlapack_int LAPACKE_zhfrk_work( int matrix_order, char transr, char uplo,\n                               char trans, lapack_int n, lapack_int k,\n                               double alpha, const lapack_complex_double* a,\n                               lapack_int lda, double beta,\n                               lapack_complex_double* c );\n\nlapack_int LAPACKE_shgeqz_work( int matrix_order, char job, char compq,\n                                char compz, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, float* h, lapack_int ldh,\n                                float* t, lapack_int ldt, float* alphar,\n                                float* alphai, float* beta, float* q,\n                                lapack_int ldq, float* z, lapack_int ldz,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dhgeqz_work( int matrix_order, char job, char compq,\n                                char compz, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, double* h, lapack_int ldh,\n                                double* t, lapack_int ldt, double* alphar,\n                                double* alphai, double* beta, double* q,\n                                lapack_int ldq, double* z, lapack_int ldz,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_chgeqz_work( int matrix_order, char job, char compq,\n                                char compz, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, lapack_complex_float* h,\n                                lapack_int ldh, lapack_complex_float* t,\n                                lapack_int ldt, lapack_complex_float* alpha,\n                                lapack_complex_float* beta,\n                                lapack_complex_float* q, lapack_int ldq,\n                                lapack_complex_float* z, lapack_int ldz,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork );\nlapack_int LAPACKE_zhgeqz_work( int matrix_order, char job, char compq,\n                                char compz, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, lapack_complex_double* h,\n                                lapack_int ldh, lapack_complex_double* t,\n                                lapack_int ldt, lapack_complex_double* alpha,\n                                lapack_complex_double* beta,\n                                lapack_complex_double* q, lapack_int ldq,\n                                lapack_complex_double* z, lapack_int ldz,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork );\n\nlapack_int LAPACKE_chpcon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_float* ap,\n                                const lapack_int* ipiv, float anorm,\n                                float* rcond, lapack_complex_float* work );\nlapack_int LAPACKE_zhpcon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_double* ap,\n                                const lapack_int* ipiv, double anorm,\n                                double* rcond, lapack_complex_double* work );\n\nlapack_int LAPACKE_chpev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_complex_float* ap, float* w,\n                               lapack_complex_float* z, lapack_int ldz,\n                               lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zhpev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_complex_double* ap,\n                               double* w, lapack_complex_double* z,\n                               lapack_int ldz, lapack_complex_double* work,\n                               double* rwork );\n\nlapack_int LAPACKE_chpevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_complex_float* ap,\n                                float* w, lapack_complex_float* z,\n                                lapack_int ldz, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork,\n                                lapack_int lrwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_zhpevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_complex_double* ap,\n                                double* w, lapack_complex_double* z,\n                                lapack_int ldz, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork,\n                                lapack_int lrwork, lapack_int* iwork,\n                                lapack_int liwork );\n\nlapack_int LAPACKE_chpevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n,\n                                lapack_complex_float* ap, float vl, float vu,\n                                lapack_int il, lapack_int iu, float abstol,\n                                lapack_int* m, float* w,\n                                lapack_complex_float* z, lapack_int ldz,\n                                lapack_complex_float* work, float* rwork,\n                                lapack_int* iwork, lapack_int* ifail );\nlapack_int LAPACKE_zhpevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n,\n                                lapack_complex_double* ap, double vl, double vu,\n                                lapack_int il, lapack_int iu, double abstol,\n                                lapack_int* m, double* w,\n                                lapack_complex_double* z, lapack_int ldz,\n                                lapack_complex_double* work, double* rwork,\n                                lapack_int* iwork, lapack_int* ifail );\n\nlapack_int LAPACKE_chpgst_work( int matrix_order, lapack_int itype, char uplo,\n                                lapack_int n, lapack_complex_float* ap,\n                                const lapack_complex_float* bp );\nlapack_int LAPACKE_zhpgst_work( int matrix_order, lapack_int itype, char uplo,\n                                lapack_int n, lapack_complex_double* ap,\n                                const lapack_complex_double* bp );\n\nlapack_int LAPACKE_chpgv_work( int matrix_order, lapack_int itype, char jobz,\n                               char uplo, lapack_int n,\n                               lapack_complex_float* ap,\n                               lapack_complex_float* bp, float* w,\n                               lapack_complex_float* z, lapack_int ldz,\n                               lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zhpgv_work( int matrix_order, lapack_int itype, char jobz,\n                               char uplo, lapack_int n,\n                               lapack_complex_double* ap,\n                               lapack_complex_double* bp, double* w,\n                               lapack_complex_double* z, lapack_int ldz,\n                               lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_chpgvd_work( int matrix_order, lapack_int itype, char jobz,\n                                char uplo, lapack_int n,\n                                lapack_complex_float* ap,\n                                lapack_complex_float* bp, float* w,\n                                lapack_complex_float* z, lapack_int ldz,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork, lapack_int lrwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_zhpgvd_work( int matrix_order, lapack_int itype, char jobz,\n                                char uplo, lapack_int n,\n                                lapack_complex_double* ap,\n                                lapack_complex_double* bp, double* w,\n                                lapack_complex_double* z, lapack_int ldz,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork, lapack_int lrwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_chpgvx_work( int matrix_order, lapack_int itype, char jobz,\n                                char range, char uplo, lapack_int n,\n                                lapack_complex_float* ap,\n                                lapack_complex_float* bp, float vl, float vu,\n                                lapack_int il, lapack_int iu, float abstol,\n                                lapack_int* m, float* w,\n                                lapack_complex_float* z, lapack_int ldz,\n                                lapack_complex_float* work, float* rwork,\n                                lapack_int* iwork, lapack_int* ifail );\nlapack_int LAPACKE_zhpgvx_work( int matrix_order, lapack_int itype, char jobz,\n                                char range, char uplo, lapack_int n,\n                                lapack_complex_double* ap,\n                                lapack_complex_double* bp, double vl, double vu,\n                                lapack_int il, lapack_int iu, double abstol,\n                                lapack_int* m, double* w,\n                                lapack_complex_double* z, lapack_int ldz,\n                                lapack_complex_double* work, double* rwork,\n                                lapack_int* iwork, lapack_int* ifail );\n\nlapack_int LAPACKE_chprfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* ap,\n                                const lapack_complex_float* afp,\n                                const lapack_int* ipiv,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zhprfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs,\n                                const lapack_complex_double* ap,\n                                const lapack_complex_double* afp,\n                                const lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_chpsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_float* ap,\n                               lapack_int* ipiv, lapack_complex_float* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_zhpsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_double* ap,\n                               lapack_int* ipiv, lapack_complex_double* b,\n                               lapack_int ldb );\n\nlapack_int LAPACKE_chpsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_float* ap,\n                                lapack_complex_float* afp, lapack_int* ipiv,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* rcond, float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zhpsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_double* ap,\n                                lapack_complex_double* afp, lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_chptrd_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* ap, float* d, float* e,\n                                lapack_complex_float* tau );\nlapack_int LAPACKE_zhptrd_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* ap, double* d, double* e,\n                                lapack_complex_double* tau );\n\nlapack_int LAPACKE_chptrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* ap, lapack_int* ipiv );\nlapack_int LAPACKE_zhptrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* ap, lapack_int* ipiv );\n\nlapack_int LAPACKE_chptri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* ap,\n                                const lapack_int* ipiv,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zhptri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* ap,\n                                const lapack_int* ipiv,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_chptrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* ap,\n                                const lapack_int* ipiv, lapack_complex_float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_zhptrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs,\n                                const lapack_complex_double* ap,\n                                const lapack_int* ipiv,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_shsein_work( int matrix_order, char job, char eigsrc,\n                                char initv, lapack_logical* select,\n                                lapack_int n, const float* h, lapack_int ldh,\n                                float* wr, const float* wi, float* vl,\n                                lapack_int ldvl, float* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m, float* work,\n                                lapack_int* ifaill, lapack_int* ifailr );\nlapack_int LAPACKE_dhsein_work( int matrix_order, char job, char eigsrc,\n                                char initv, lapack_logical* select,\n                                lapack_int n, const double* h, lapack_int ldh,\n                                double* wr, const double* wi, double* vl,\n                                lapack_int ldvl, double* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m, double* work,\n                                lapack_int* ifaill, lapack_int* ifailr );\nlapack_int LAPACKE_chsein_work( int matrix_order, char job, char eigsrc,\n                                char initv, const lapack_logical* select,\n                                lapack_int n, const lapack_complex_float* h,\n                                lapack_int ldh, lapack_complex_float* w,\n                                lapack_complex_float* vl, lapack_int ldvl,\n                                lapack_complex_float* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m,\n                                lapack_complex_float* work, float* rwork,\n                                lapack_int* ifaill, lapack_int* ifailr );\nlapack_int LAPACKE_zhsein_work( int matrix_order, char job, char eigsrc,\n                                char initv, const lapack_logical* select,\n                                lapack_int n, const lapack_complex_double* h,\n                                lapack_int ldh, lapack_complex_double* w,\n                                lapack_complex_double* vl, lapack_int ldvl,\n                                lapack_complex_double* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m,\n                                lapack_complex_double* work, double* rwork,\n                                lapack_int* ifaill, lapack_int* ifailr );\n\nlapack_int LAPACKE_shseqr_work( int matrix_order, char job, char compz,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                float* h, lapack_int ldh, float* wr, float* wi,\n                                float* z, lapack_int ldz, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dhseqr_work( int matrix_order, char job, char compz,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                double* h, lapack_int ldh, double* wr,\n                                double* wi, double* z, lapack_int ldz,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_chseqr_work( int matrix_order, char job, char compz,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                lapack_complex_float* h, lapack_int ldh,\n                                lapack_complex_float* w,\n                                lapack_complex_float* z, lapack_int ldz,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zhseqr_work( int matrix_order, char job, char compz,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                lapack_complex_double* h, lapack_int ldh,\n                                lapack_complex_double* w,\n                                lapack_complex_double* z, lapack_int ldz,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_clacgv_work( lapack_int n, lapack_complex_float* x,\n                                lapack_int incx );\nlapack_int LAPACKE_zlacgv_work( lapack_int n, lapack_complex_double* x,\n                                lapack_int incx );\n\nlapack_int LAPACKE_slacn2_work( lapack_int n, float* v, float* x,\n                                lapack_int* isgn, float* est, lapack_int* kase,\n                                lapack_int* isave );\nlapack_int LAPACKE_dlacn2_work( lapack_int n, double* v, double* x,\n                                lapack_int* isgn, double* est, lapack_int* kase,\n                                lapack_int* isave );\nlapack_int LAPACKE_clacn2_work( lapack_int n, lapack_complex_float* v,\n                                lapack_complex_float* x,\n                                float* est, lapack_int* kase,\n                                lapack_int* isave );\nlapack_int LAPACKE_zlacn2_work( lapack_int n, lapack_complex_double* v,\n                                lapack_complex_double* x,\n                                double* est, lapack_int* kase,\n                                lapack_int* isave );\n\nlapack_int LAPACKE_slacpy_work( int matrix_order, char uplo, lapack_int m,\n                                lapack_int n, const float* a, lapack_int lda,\n                                float* b, lapack_int ldb );\nlapack_int LAPACKE_dlacpy_work( int matrix_order, char uplo, lapack_int m,\n                                lapack_int n, const double* a, lapack_int lda,\n                                double* b, lapack_int ldb );\nlapack_int LAPACKE_clacpy_work( int matrix_order, char uplo, lapack_int m,\n                                lapack_int n, const lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_zlacpy_work( int matrix_order, char uplo, lapack_int m,\n                                lapack_int n, const lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb );\n\nlapack_int LAPACKE_clacp2_work( int matrix_order, char uplo, lapack_int m,\n                                lapack_int n, const float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zlacp2_work( int matrix_order, char uplo, lapack_int m,\n                                lapack_int n, const double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_zlag2c_work( int matrix_order, lapack_int m, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_float* sa, lapack_int ldsa );\n\nlapack_int LAPACKE_slag2d_work( int matrix_order, lapack_int m, lapack_int n,\n                                const float* sa, lapack_int ldsa, double* a,\n                                lapack_int lda );\n\nlapack_int LAPACKE_dlag2s_work( int matrix_order, lapack_int m, lapack_int n,\n                                const double* a, lapack_int lda, float* sa,\n                                lapack_int ldsa );\n\nlapack_int LAPACKE_clag2z_work( int matrix_order, lapack_int m, lapack_int n,\n                                const lapack_complex_float* sa, lapack_int ldsa,\n                                lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_slagge_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku, const float* d,\n                                float* a, lapack_int lda, lapack_int* iseed,\n                                float* work );\nlapack_int LAPACKE_dlagge_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku, const double* d,\n                                double* a, lapack_int lda, lapack_int* iseed,\n                                double* work );\nlapack_int LAPACKE_clagge_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku, const float* d,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int* iseed, lapack_complex_float* work );\nlapack_int LAPACKE_zlagge_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku, const double* d,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* iseed,\n                                lapack_complex_double* work );\n                                \nlapack_int LAPACKE_claghe_work( int matrix_order, lapack_int n, lapack_int k,\n                                const float* d, lapack_complex_float* a,\n                                lapack_int lda, lapack_int* iseed,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zlaghe_work( int matrix_order, lapack_int n, lapack_int k,\n                                const double* d, lapack_complex_double* a,\n                                lapack_int lda, lapack_int* iseed,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_slagsy_work( int matrix_order, lapack_int n, lapack_int k,\n                                const float* d, float* a, lapack_int lda,\n                                lapack_int* iseed, float* work );\nlapack_int LAPACKE_dlagsy_work( int matrix_order, lapack_int n, lapack_int k,\n                                const double* d, double* a, lapack_int lda,\n                                lapack_int* iseed, double* work );\nlapack_int LAPACKE_clagsy_work( int matrix_order, lapack_int n, lapack_int k,\n                                const float* d, lapack_complex_float* a,\n                                lapack_int lda, lapack_int* iseed,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zlagsy_work( int matrix_order, lapack_int n, lapack_int k,\n                                const double* d, lapack_complex_double* a,\n                                lapack_int lda, lapack_int* iseed,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_slapmr_work( int matrix_order, lapack_logical forwrd,\n                                lapack_int m, lapack_int n, float* x,\n                                lapack_int ldx, lapack_int* k );\nlapack_int LAPACKE_dlapmr_work( int matrix_order, lapack_logical forwrd,\n                                lapack_int m, lapack_int n, double* x,\n                                lapack_int ldx, lapack_int* k );\nlapack_int LAPACKE_clapmr_work( int matrix_order, lapack_logical forwrd,\n                                lapack_int m, lapack_int n,\n                                lapack_complex_float* x, lapack_int ldx,\n                                lapack_int* k );\nlapack_int LAPACKE_zlapmr_work( int matrix_order, lapack_logical forwrd,\n                                lapack_int m, lapack_int n,\n                                lapack_complex_double* x, lapack_int ldx,\n                                lapack_int* k );\n\nlapack_int LAPACKE_slartgp_work( float f, float g, float* cs, float* sn,\n                                 float* r );\nlapack_int LAPACKE_dlartgp_work( double f, double g, double* cs, double* sn,\n                                 double* r );\n\nlapack_int LAPACKE_slartgs_work( float x, float y, float sigma, float* cs,\n                                 float* sn );\nlapack_int LAPACKE_dlartgs_work( double x, double y, double sigma, double* cs,\n                                 double* sn );\n                                \nfloat LAPACKE_slapy2_work( float x, float y );\ndouble LAPACKE_dlapy2_work( double x, double y );\n\nfloat LAPACKE_slapy3_work( float x, float y, float z );\ndouble LAPACKE_dlapy3_work( double x, double y, double z );\n\nfloat LAPACKE_slamch_work( char cmach );\ndouble LAPACKE_dlamch_work( char cmach );\n\nfloat LAPACKE_slange_work( int matrix_order, char norm, lapack_int m,\n                                lapack_int n, const float* a, lapack_int lda,\n                                float* work );\ndouble LAPACKE_dlange_work( int matrix_order, char norm, lapack_int m,\n                                lapack_int n, const double* a, lapack_int lda,\n                                double* work );\nfloat LAPACKE_clange_work( int matrix_order, char norm, lapack_int m,\n                                lapack_int n, const lapack_complex_float* a,\n                                lapack_int lda, float* work );\ndouble LAPACKE_zlange_work( int matrix_order, char norm, lapack_int m,\n                                lapack_int n, const lapack_complex_double* a,\n                                lapack_int lda, double* work );\n\nfloat LAPACKE_clanhe_work( int matrix_order, char norm, char uplo,\n                                lapack_int n, const lapack_complex_float* a,\n                                lapack_int lda, float* work );\ndouble LAPACKE_zlanhe_work( int matrix_order, char norm, char uplo,\n                                lapack_int n, const lapack_complex_double* a,\n                                lapack_int lda, double* work );\n\nfloat LAPACKE_slansy_work( int matrix_order, char norm, char uplo,\n                                lapack_int n, const float* a, lapack_int lda,\n                                float* work );\ndouble LAPACKE_dlansy_work( int matrix_order, char norm, char uplo,\n                                lapack_int n, const double* a, lapack_int lda,\n                                double* work );\nfloat LAPACKE_clansy_work( int matrix_order, char norm, char uplo,\n                                lapack_int n, const lapack_complex_float* a,\n                                lapack_int lda, float* work );\ndouble LAPACKE_zlansy_work( int matrix_order, char norm, char uplo,\n                                lapack_int n, const lapack_complex_double* a,\n                                lapack_int lda, double* work );\n\nfloat LAPACKE_slantr_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int m, lapack_int n, const float* a,\n                                lapack_int lda, float* work );\ndouble LAPACKE_dlantr_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int m, lapack_int n,\n                                const double* a, lapack_int lda, double* work );\nfloat LAPACKE_clantr_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int m, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                float* work );\ndouble LAPACKE_zlantr_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int m, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                double* work );\n\nlapack_int LAPACKE_slarfb_work( int matrix_order, char side, char trans,\n                                char direct, char storev, lapack_int m,\n                                lapack_int n, lapack_int k, const float* v,\n                                lapack_int ldv, const float* t, lapack_int ldt,\n                                float* c, lapack_int ldc, float* work,\n                                lapack_int ldwork );\nlapack_int LAPACKE_dlarfb_work( int matrix_order, char side, char trans,\n                                char direct, char storev, lapack_int m,\n                                lapack_int n, lapack_int k, const double* v,\n                                lapack_int ldv, const double* t, lapack_int ldt,\n                                double* c, lapack_int ldc, double* work,\n                                lapack_int ldwork );\nlapack_int LAPACKE_clarfb_work( int matrix_order, char side, char trans,\n                                char direct, char storev, lapack_int m,\n                                lapack_int n, lapack_int k,\n                                const lapack_complex_float* v, lapack_int ldv,\n                                const lapack_complex_float* t, lapack_int ldt,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, lapack_int ldwork );\nlapack_int LAPACKE_zlarfb_work( int matrix_order, char side, char trans,\n                                char direct, char storev, lapack_int m,\n                                lapack_int n, lapack_int k,\n                                const lapack_complex_double* v, lapack_int ldv,\n                                const lapack_complex_double* t, lapack_int ldt,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work,\n                                lapack_int ldwork );\n\nlapack_int LAPACKE_slarfg_work( lapack_int n, float* alpha, float* x,\n                                lapack_int incx, float* tau );\nlapack_int LAPACKE_dlarfg_work( lapack_int n, double* alpha, double* x,\n                                lapack_int incx, double* tau );\nlapack_int LAPACKE_clarfg_work( lapack_int n, lapack_complex_float* alpha,\n                                lapack_complex_float* x, lapack_int incx,\n                                lapack_complex_float* tau );\nlapack_int LAPACKE_zlarfg_work( lapack_int n, lapack_complex_double* alpha,\n                                lapack_complex_double* x, lapack_int incx,\n                                lapack_complex_double* tau );\n\nlapack_int LAPACKE_slarft_work( int matrix_order, char direct, char storev,\n                                lapack_int n, lapack_int k, const float* v,\n                                lapack_int ldv, const float* tau, float* t,\n                                lapack_int ldt );\nlapack_int LAPACKE_dlarft_work( int matrix_order, char direct, char storev,\n                                lapack_int n, lapack_int k, const double* v,\n                                lapack_int ldv, const double* tau, double* t,\n                                lapack_int ldt );\nlapack_int LAPACKE_clarft_work( int matrix_order, char direct, char storev,\n                                lapack_int n, lapack_int k,\n                                const lapack_complex_float* v, lapack_int ldv,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* t, lapack_int ldt );\nlapack_int LAPACKE_zlarft_work( int matrix_order, char direct, char storev,\n                                lapack_int n, lapack_int k,\n                                const lapack_complex_double* v, lapack_int ldv,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* t, lapack_int ldt );\n\nlapack_int LAPACKE_slarfx_work( int matrix_order, char side, lapack_int m,\n                                lapack_int n, const float* v, float tau,\n                                float* c, lapack_int ldc, float* work );\nlapack_int LAPACKE_dlarfx_work( int matrix_order, char side, lapack_int m,\n                                lapack_int n, const double* v, double tau,\n                                double* c, lapack_int ldc, double* work );\nlapack_int LAPACKE_clarfx_work( int matrix_order, char side, lapack_int m,\n                                lapack_int n, const lapack_complex_float* v,\n                                lapack_complex_float tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zlarfx_work( int matrix_order, char side, lapack_int m,\n                                lapack_int n, const lapack_complex_double* v,\n                                lapack_complex_double tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_slarnv_work( lapack_int idist, lapack_int* iseed,\n                                lapack_int n, float* x );\nlapack_int LAPACKE_dlarnv_work( lapack_int idist, lapack_int* iseed,\n                                lapack_int n, double* x );\nlapack_int LAPACKE_clarnv_work( lapack_int idist, lapack_int* iseed,\n                                lapack_int n, lapack_complex_float* x );\nlapack_int LAPACKE_zlarnv_work( lapack_int idist, lapack_int* iseed,\n                                lapack_int n, lapack_complex_double* x );\n\nlapack_int LAPACKE_slaset_work( int matrix_order, char uplo, lapack_int m,\n                                lapack_int n, float alpha, float beta, float* a,\n                                lapack_int lda );\nlapack_int LAPACKE_dlaset_work( int matrix_order, char uplo, lapack_int m,\n                                lapack_int n, double alpha, double beta,\n                                double* a, lapack_int lda );\nlapack_int LAPACKE_claset_work( int matrix_order, char uplo, lapack_int m,\n                                lapack_int n, lapack_complex_float alpha,\n                                lapack_complex_float beta,\n                                lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_zlaset_work( int matrix_order, char uplo, lapack_int m,\n                                lapack_int n, lapack_complex_double alpha,\n                                lapack_complex_double beta,\n                                lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_slasrt_work( char id, lapack_int n, float* d );\nlapack_int LAPACKE_dlasrt_work( char id, lapack_int n, double* d );\n\nlapack_int LAPACKE_slaswp_work( int matrix_order, lapack_int n, float* a,\n                                lapack_int lda, lapack_int k1, lapack_int k2,\n                                const lapack_int* ipiv, lapack_int incx );\nlapack_int LAPACKE_dlaswp_work( int matrix_order, lapack_int n, double* a,\n                                lapack_int lda, lapack_int k1, lapack_int k2,\n                                const lapack_int* ipiv, lapack_int incx );\nlapack_int LAPACKE_claswp_work( int matrix_order, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int k1, lapack_int k2,\n                                const lapack_int* ipiv, lapack_int incx );\nlapack_int LAPACKE_zlaswp_work( int matrix_order, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int k1, lapack_int k2,\n                                const lapack_int* ipiv, lapack_int incx );\n\nlapack_int LAPACKE_slatms_work( int matrix_order, lapack_int m, lapack_int n,\n                                char dist, lapack_int* iseed, char sym,\n                                float* d, lapack_int mode, float cond,\n                                float dmax, lapack_int kl, lapack_int ku,\n                                char pack, float* a, lapack_int lda,\n                                float* work );\nlapack_int LAPACKE_dlatms_work( int matrix_order, lapack_int m, lapack_int n,\n                                char dist, lapack_int* iseed, char sym,\n                                double* d, lapack_int mode, double cond,\n                                double dmax, lapack_int kl, lapack_int ku,\n                                char pack, double* a, lapack_int lda,\n                                double* work );\nlapack_int LAPACKE_clatms_work( int matrix_order, lapack_int m, lapack_int n,\n                                char dist, lapack_int* iseed, char sym,\n                                float* d, lapack_int mode, float cond,\n                                float dmax, lapack_int kl, lapack_int ku,\n                                char pack, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* work );\nlapack_int LAPACKE_zlatms_work( int matrix_order, lapack_int m, lapack_int n,\n                                char dist, lapack_int* iseed, char sym,\n                                double* d, lapack_int mode, double cond,\n                                double dmax, lapack_int kl, lapack_int ku,\n                                char pack, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* work );\n\nlapack_int LAPACKE_slauum_work( int matrix_order, char uplo, lapack_int n,\n                                float* a, lapack_int lda );\nlapack_int LAPACKE_dlauum_work( int matrix_order, char uplo, lapack_int n,\n                                double* a, lapack_int lda );\nlapack_int LAPACKE_clauum_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_zlauum_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_sopgtr_work( int matrix_order, char uplo, lapack_int n,\n                                const float* ap, const float* tau, float* q,\n                                lapack_int ldq, float* work );\nlapack_int LAPACKE_dopgtr_work( int matrix_order, char uplo, lapack_int n,\n                                const double* ap, const double* tau, double* q,\n                                lapack_int ldq, double* work );\n\nlapack_int LAPACKE_sopmtr_work( int matrix_order, char side, char uplo,\n                                char trans, lapack_int m, lapack_int n,\n                                const float* ap, const float* tau, float* c,\n                                lapack_int ldc, float* work );\nlapack_int LAPACKE_dopmtr_work( int matrix_order, char side, char uplo,\n                                char trans, lapack_int m, lapack_int n,\n                                const double* ap, const double* tau, double* c,\n                                lapack_int ldc, double* work );\n\nlapack_int LAPACKE_sorgbr_work( int matrix_order, char vect, lapack_int m,\n                                lapack_int n, lapack_int k, float* a,\n                                lapack_int lda, const float* tau, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dorgbr_work( int matrix_order, char vect, lapack_int m,\n                                lapack_int n, lapack_int k, double* a,\n                                lapack_int lda, const double* tau, double* work,\n                                lapack_int lwork );\n\nlapack_int LAPACKE_sorghr_work( int matrix_order, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, float* a, lapack_int lda,\n                                const float* tau, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dorghr_work( int matrix_order, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, double* a, lapack_int lda,\n                                const double* tau, double* work,\n                                lapack_int lwork );\n\nlapack_int LAPACKE_sorglq_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, float* a, lapack_int lda,\n                                const float* tau, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dorglq_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, double* a, lapack_int lda,\n                                const double* tau, double* work,\n                                lapack_int lwork );\n\nlapack_int LAPACKE_sorgql_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, float* a, lapack_int lda,\n                                const float* tau, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dorgql_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, double* a, lapack_int lda,\n                                const double* tau, double* work,\n                                lapack_int lwork );\n\nlapack_int LAPACKE_sorgqr_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, float* a, lapack_int lda,\n                                const float* tau, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dorgqr_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, double* a, lapack_int lda,\n                                const double* tau, double* work,\n                                lapack_int lwork );\n\nlapack_int LAPACKE_sorgrq_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, float* a, lapack_int lda,\n                                const float* tau, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dorgrq_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, double* a, lapack_int lda,\n                                const double* tau, double* work,\n                                lapack_int lwork );\n\nlapack_int LAPACKE_sorgtr_work( int matrix_order, char uplo, lapack_int n,\n                                float* a, lapack_int lda, const float* tau,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dorgtr_work( int matrix_order, char uplo, lapack_int n,\n                                double* a, lapack_int lda, const double* tau,\n                                double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sormbr_work( int matrix_order, char vect, char side,\n                                char trans, lapack_int m, lapack_int n,\n                                lapack_int k, const float* a, lapack_int lda,\n                                const float* tau, float* c, lapack_int ldc,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dormbr_work( int matrix_order, char vect, char side,\n                                char trans, lapack_int m, lapack_int n,\n                                lapack_int k, const double* a, lapack_int lda,\n                                const double* tau, double* c, lapack_int ldc,\n                                double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sormhr_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, const float* a, lapack_int lda,\n                                const float* tau, float* c, lapack_int ldc,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dormhr_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, const double* a, lapack_int lda,\n                                const double* tau, double* c, lapack_int ldc,\n                                double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sormlq_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const float* a, lapack_int lda,\n                                const float* tau, float* c, lapack_int ldc,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dormlq_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const double* a, lapack_int lda,\n                                const double* tau, double* c, lapack_int ldc,\n                                double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sormql_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const float* a, lapack_int lda,\n                                const float* tau, float* c, lapack_int ldc,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dormql_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const double* a, lapack_int lda,\n                                const double* tau, double* c, lapack_int ldc,\n                                double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sormqr_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const float* a, lapack_int lda,\n                                const float* tau, float* c, lapack_int ldc,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dormqr_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const double* a, lapack_int lda,\n                                const double* tau, double* c, lapack_int ldc,\n                                double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sormrq_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const float* a, lapack_int lda,\n                                const float* tau, float* c, lapack_int ldc,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dormrq_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const double* a, lapack_int lda,\n                                const double* tau, double* c, lapack_int ldc,\n                                double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sormrz_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                lapack_int l, const float* a, lapack_int lda,\n                                const float* tau, float* c, lapack_int ldc,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dormrz_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                lapack_int l, const double* a, lapack_int lda,\n                                const double* tau, double* c, lapack_int ldc,\n                                double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sormtr_work( int matrix_order, char side, char uplo,\n                                char trans, lapack_int m, lapack_int n,\n                                const float* a, lapack_int lda,\n                                const float* tau, float* c, lapack_int ldc,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dormtr_work( int matrix_order, char side, char uplo,\n                                char trans, lapack_int m, lapack_int n,\n                                const double* a, lapack_int lda,\n                                const double* tau, double* c, lapack_int ldc,\n                                double* work, lapack_int lwork );\n\nlapack_int LAPACKE_spbcon_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, const float* ab, lapack_int ldab,\n                                float anorm, float* rcond, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dpbcon_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, const double* ab,\n                                lapack_int ldab, double anorm, double* rcond,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_cpbcon_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, const lapack_complex_float* ab,\n                                lapack_int ldab, float anorm, float* rcond,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zpbcon_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, const lapack_complex_double* ab,\n                                lapack_int ldab, double anorm, double* rcond,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_spbequ_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, const float* ab, lapack_int ldab,\n                                float* s, float* scond, float* amax );\nlapack_int LAPACKE_dpbequ_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, const double* ab,\n                                lapack_int ldab, double* s, double* scond,\n                                double* amax );\nlapack_int LAPACKE_cpbequ_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, const lapack_complex_float* ab,\n                                lapack_int ldab, float* s, float* scond,\n                                float* amax );\nlapack_int LAPACKE_zpbequ_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, const lapack_complex_double* ab,\n                                lapack_int ldab, double* s, double* scond,\n                                double* amax );\n\nlapack_int LAPACKE_spbrfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, lapack_int nrhs, const float* ab,\n                                lapack_int ldab, const float* afb,\n                                lapack_int ldafb, const float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* ferr, float* berr, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dpbrfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, lapack_int nrhs,\n                                const double* ab, lapack_int ldab,\n                                const double* afb, lapack_int ldafb,\n                                const double* b, lapack_int ldb, double* x,\n                                lapack_int ldx, double* ferr, double* berr,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_cpbrfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, lapack_int nrhs,\n                                const lapack_complex_float* ab, lapack_int ldab,\n                                const lapack_complex_float* afb,\n                                lapack_int ldafb, const lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* x,\n                                lapack_int ldx, float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zpbrfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, lapack_int nrhs,\n                                const lapack_complex_double* ab,\n                                lapack_int ldab,\n                                const lapack_complex_double* afb,\n                                lapack_int ldafb,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_spbstf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kb, float* bb, lapack_int ldbb );\nlapack_int LAPACKE_dpbstf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kb, double* bb, lapack_int ldbb );\nlapack_int LAPACKE_cpbstf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kb, lapack_complex_float* bb,\n                                lapack_int ldbb );\nlapack_int LAPACKE_zpbstf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kb, lapack_complex_double* bb,\n                                lapack_int ldbb );\n\nlapack_int LAPACKE_spbsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int kd, lapack_int nrhs, float* ab,\n                               lapack_int ldab, float* b, lapack_int ldb );\nlapack_int LAPACKE_dpbsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int kd, lapack_int nrhs, double* ab,\n                               lapack_int ldab, double* b, lapack_int ldb );\nlapack_int LAPACKE_cpbsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int kd, lapack_int nrhs,\n                               lapack_complex_float* ab, lapack_int ldab,\n                               lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zpbsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int kd, lapack_int nrhs,\n                               lapack_complex_double* ab, lapack_int ldab,\n                               lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_spbsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int kd, lapack_int nrhs,\n                                float* ab, lapack_int ldab, float* afb,\n                                lapack_int ldafb, char* equed, float* s,\n                                float* b, lapack_int ldb, float* x,\n                                lapack_int ldx, float* rcond, float* ferr,\n                                float* berr, float* work, lapack_int* iwork );\nlapack_int LAPACKE_dpbsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int kd, lapack_int nrhs,\n                                double* ab, lapack_int ldab, double* afb,\n                                lapack_int ldafb, char* equed, double* s,\n                                double* b, lapack_int ldb, double* x,\n                                lapack_int ldx, double* rcond, double* ferr,\n                                double* berr, double* work, lapack_int* iwork );\nlapack_int LAPACKE_cpbsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int kd, lapack_int nrhs,\n                                lapack_complex_float* ab, lapack_int ldab,\n                                lapack_complex_float* afb, lapack_int ldafb,\n                                char* equed, float* s, lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* x,\n                                lapack_int ldx, float* rcond, float* ferr,\n                                float* berr, lapack_complex_float* work,\n                                float* rwork );\nlapack_int LAPACKE_zpbsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int kd, lapack_int nrhs,\n                                lapack_complex_double* ab, lapack_int ldab,\n                                lapack_complex_double* afb, lapack_int ldafb,\n                                char* equed, double* s,\n                                lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_spbtrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, float* ab, lapack_int ldab );\nlapack_int LAPACKE_dpbtrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, double* ab, lapack_int ldab );\nlapack_int LAPACKE_cpbtrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, lapack_complex_float* ab,\n                                lapack_int ldab );\nlapack_int LAPACKE_zpbtrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, lapack_complex_double* ab,\n                                lapack_int ldab );\n\nlapack_int LAPACKE_spbtrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, lapack_int nrhs, const float* ab,\n                                lapack_int ldab, float* b, lapack_int ldb );\nlapack_int LAPACKE_dpbtrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, lapack_int nrhs,\n                                const double* ab, lapack_int ldab, double* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_cpbtrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, lapack_int nrhs,\n                                const lapack_complex_float* ab, lapack_int ldab,\n                                lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zpbtrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, lapack_int nrhs,\n                                const lapack_complex_double* ab,\n                                lapack_int ldab, lapack_complex_double* b,\n                                lapack_int ldb );\n\nlapack_int LAPACKE_spftrf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, float* a );\nlapack_int LAPACKE_dpftrf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, double* a );\nlapack_int LAPACKE_cpftrf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, lapack_complex_float* a );\nlapack_int LAPACKE_zpftrf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, lapack_complex_double* a );\n\nlapack_int LAPACKE_spftri_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, float* a );\nlapack_int LAPACKE_dpftri_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, double* a );\nlapack_int LAPACKE_cpftri_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, lapack_complex_float* a );\nlapack_int LAPACKE_zpftri_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, lapack_complex_double* a );\n\nlapack_int LAPACKE_spftrs_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, lapack_int nrhs, const float* a,\n                                float* b, lapack_int ldb );\nlapack_int LAPACKE_dpftrs_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, lapack_int nrhs, const double* a,\n                                double* b, lapack_int ldb );\nlapack_int LAPACKE_cpftrs_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_float* a,\n                                lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zpftrs_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_double* a,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_spocon_work( int matrix_order, char uplo, lapack_int n,\n                                const float* a, lapack_int lda, float anorm,\n                                float* rcond, float* work, lapack_int* iwork );\nlapack_int LAPACKE_dpocon_work( int matrix_order, char uplo, lapack_int n,\n                                const double* a, lapack_int lda, double anorm,\n                                double* rcond, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_cpocon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                float anorm, float* rcond,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zpocon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                double anorm, double* rcond,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_spoequ_work( int matrix_order, lapack_int n, const float* a,\n                                lapack_int lda, float* s, float* scond,\n                                float* amax );\nlapack_int LAPACKE_dpoequ_work( int matrix_order, lapack_int n, const double* a,\n                                lapack_int lda, double* s, double* scond,\n                                double* amax );\nlapack_int LAPACKE_cpoequ_work( int matrix_order, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                float* s, float* scond, float* amax );\nlapack_int LAPACKE_zpoequ_work( int matrix_order, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                double* s, double* scond, double* amax );\n\nlapack_int LAPACKE_spoequb_work( int matrix_order, lapack_int n, const float* a,\n                                 lapack_int lda, float* s, float* scond,\n                                 float* amax );\nlapack_int LAPACKE_dpoequb_work( int matrix_order, lapack_int n,\n                                 const double* a, lapack_int lda, double* s,\n                                 double* scond, double* amax );\nlapack_int LAPACKE_cpoequb_work( int matrix_order, lapack_int n,\n                                 const lapack_complex_float* a, lapack_int lda,\n                                 float* s, float* scond, float* amax );\nlapack_int LAPACKE_zpoequb_work( int matrix_order, lapack_int n,\n                                 const lapack_complex_double* a, lapack_int lda,\n                                 double* s, double* scond, double* amax );\n\nlapack_int LAPACKE_sporfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* a, lapack_int lda,\n                                const float* af, lapack_int ldaf,\n                                const float* b, lapack_int ldb, float* x,\n                                lapack_int ldx, float* ferr, float* berr,\n                                float* work, lapack_int* iwork );\nlapack_int LAPACKE_dporfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* a,\n                                lapack_int lda, const double* af,\n                                lapack_int ldaf, const double* b,\n                                lapack_int ldb, double* x, lapack_int ldx,\n                                double* ferr, double* berr, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_cporfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* af,\n                                lapack_int ldaf, const lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* x,\n                                lapack_int ldx, float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zporfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_double* a,\n                                lapack_int lda, const lapack_complex_double* af,\n                                lapack_int ldaf, const lapack_complex_double* b,\n                                lapack_int ldb, lapack_complex_double* x,\n                                lapack_int ldx, double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sporfsx_work( int matrix_order, char uplo, char equed,\n                                 lapack_int n, lapack_int nrhs, const float* a,\n                                 lapack_int lda, const float* af,\n                                 lapack_int ldaf, const float* s,\n                                 const float* b, lapack_int ldb, float* x,\n                                 lapack_int ldx, float* rcond, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, float* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_dporfsx_work( int matrix_order, char uplo, char equed,\n                                 lapack_int n, lapack_int nrhs, const double* a,\n                                 lapack_int lda, const double* af,\n                                 lapack_int ldaf, const double* s,\n                                 const double* b, lapack_int ldb, double* x,\n                                 lapack_int ldx, double* rcond, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, double* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_cporfsx_work( int matrix_order, char uplo, char equed,\n                                 lapack_int n, lapack_int nrhs,\n                                 const lapack_complex_float* a, lapack_int lda,\n                                 const lapack_complex_float* af,\n                                 lapack_int ldaf, const float* s,\n                                 const lapack_complex_float* b, lapack_int ldb,\n                                 lapack_complex_float* x, lapack_int ldx,\n                                 float* rcond, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, lapack_complex_float* work,\n                                 float* rwork );\nlapack_int LAPACKE_zporfsx_work( int matrix_order, char uplo, char equed,\n                                 lapack_int n, lapack_int nrhs,\n                                 const lapack_complex_double* a, lapack_int lda,\n                                 const lapack_complex_double* af,\n                                 lapack_int ldaf, const double* s,\n                                 const lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* x, lapack_int ldx,\n                                 double* rcond, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, lapack_complex_double* work,\n                                 double* rwork );\n\nlapack_int LAPACKE_sposv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, float* a, lapack_int lda,\n                               float* b, lapack_int ldb );\nlapack_int LAPACKE_dposv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, double* a, lapack_int lda,\n                               double* b, lapack_int ldb );\nlapack_int LAPACKE_cposv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_float* a,\n                               lapack_int lda, lapack_complex_float* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_zposv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_double* a,\n                               lapack_int lda, lapack_complex_double* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_dsposv_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, double* a, lapack_int lda,\n                                double* b, lapack_int ldb, double* x,\n                                lapack_int ldx, double* work, float* swork,\n                                lapack_int* iter );\nlapack_int LAPACKE_zcposv_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb, lapack_complex_double* x,\n                                lapack_int ldx, lapack_complex_double* work,\n                                lapack_complex_float* swork, double* rwork,\n                                lapack_int* iter );\n\nlapack_int LAPACKE_sposvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs, float* a,\n                                lapack_int lda, float* af, lapack_int ldaf,\n                                char* equed, float* s, float* b, lapack_int ldb,\n                                float* x, lapack_int ldx, float* rcond,\n                                float* ferr, float* berr, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dposvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs, double* a,\n                                lapack_int lda, double* af, lapack_int ldaf,\n                                char* equed, double* s, double* b,\n                                lapack_int ldb, double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_cposvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* af, lapack_int ldaf,\n                                char* equed, float* s, lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* x,\n                                lapack_int ldx, float* rcond, float* ferr,\n                                float* berr, lapack_complex_float* work,\n                                float* rwork );\nlapack_int LAPACKE_zposvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* af, lapack_int ldaf,\n                                char* equed, double* s,\n                                lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sposvxx_work( int matrix_order, char fact, char uplo,\n                                 lapack_int n, lapack_int nrhs, float* a,\n                                 lapack_int lda, float* af, lapack_int ldaf,\n                                 char* equed, float* s, float* b,\n                                 lapack_int ldb, float* x, lapack_int ldx,\n                                 float* rcond, float* rpvgrw, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, float* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_dposvxx_work( int matrix_order, char fact, char uplo,\n                                 lapack_int n, lapack_int nrhs, double* a,\n                                 lapack_int lda, double* af, lapack_int ldaf,\n                                 char* equed, double* s, double* b,\n                                 lapack_int ldb, double* x, lapack_int ldx,\n                                 double* rcond, double* rpvgrw, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, double* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_cposvxx_work( int matrix_order, char fact, char uplo,\n                                 lapack_int n, lapack_int nrhs,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 lapack_complex_float* af, lapack_int ldaf,\n                                 char* equed, float* s, lapack_complex_float* b,\n                                 lapack_int ldb, lapack_complex_float* x,\n                                 lapack_int ldx, float* rcond, float* rpvgrw,\n                                 float* berr, lapack_int n_err_bnds,\n                                 float* err_bnds_norm, float* err_bnds_comp,\n                                 lapack_int nparams, float* params,\n                                 lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zposvxx_work( int matrix_order, char fact, char uplo,\n                                 lapack_int n, lapack_int nrhs,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 lapack_complex_double* af, lapack_int ldaf,\n                                 char* equed, double* s,\n                                 lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* x, lapack_int ldx,\n                                 double* rcond, double* rpvgrw, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, lapack_complex_double* work,\n                                 double* rwork );\n\nlapack_int LAPACKE_spotrf_work( int matrix_order, char uplo, lapack_int n,\n                                float* a, lapack_int lda );\nlapack_int LAPACKE_dpotrf_work( int matrix_order, char uplo, lapack_int n,\n                                double* a, lapack_int lda );\nlapack_int LAPACKE_cpotrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_zpotrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_spotri_work( int matrix_order, char uplo, lapack_int n,\n                                float* a, lapack_int lda );\nlapack_int LAPACKE_dpotri_work( int matrix_order, char uplo, lapack_int n,\n                                double* a, lapack_int lda );\nlapack_int LAPACKE_cpotri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_zpotri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_spotrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* a, lapack_int lda,\n                                float* b, lapack_int ldb );\nlapack_int LAPACKE_dpotrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* a,\n                                lapack_int lda, double* b, lapack_int ldb );\nlapack_int LAPACKE_cpotrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_zpotrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb );\n\nlapack_int LAPACKE_sppcon_work( int matrix_order, char uplo, lapack_int n,\n                                const float* ap, float anorm, float* rcond,\n                                float* work, lapack_int* iwork );\nlapack_int LAPACKE_dppcon_work( int matrix_order, char uplo, lapack_int n,\n                                const double* ap, double anorm, double* rcond,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_cppcon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_float* ap, float anorm,\n                                float* rcond, lapack_complex_float* work,\n                                float* rwork );\nlapack_int LAPACKE_zppcon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_double* ap, double anorm,\n                                double* rcond, lapack_complex_double* work,\n                                double* rwork );\n\nlapack_int LAPACKE_sppequ_work( int matrix_order, char uplo, lapack_int n,\n                                const float* ap, float* s, float* scond,\n                                float* amax );\nlapack_int LAPACKE_dppequ_work( int matrix_order, char uplo, lapack_int n,\n                                const double* ap, double* s, double* scond,\n                                double* amax );\nlapack_int LAPACKE_cppequ_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_float* ap, float* s,\n                                float* scond, float* amax );\nlapack_int LAPACKE_zppequ_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_double* ap, double* s,\n                                double* scond, double* amax );\n\nlapack_int LAPACKE_spprfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* ap,\n                                const float* afp, const float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* ferr, float* berr, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dpprfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* ap,\n                                const double* afp, const double* b,\n                                lapack_int ldb, double* x, lapack_int ldx,\n                                double* ferr, double* berr, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_cpprfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* ap,\n                                const lapack_complex_float* afp,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zpprfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs,\n                                const lapack_complex_double* ap,\n                                const lapack_complex_double* afp,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sppsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, float* ap, float* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_dppsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, double* ap, double* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_cppsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_float* ap,\n                               lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zppsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_double* ap,\n                               lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sppsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs, float* ap,\n                                float* afp, char* equed, float* s, float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* rcond, float* ferr, float* berr,\n                                float* work, lapack_int* iwork );\nlapack_int LAPACKE_dppsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs, double* ap,\n                                double* afp, char* equed, double* s, double* b,\n                                lapack_int ldb, double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_cppsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                lapack_complex_float* ap,\n                                lapack_complex_float* afp, char* equed,\n                                float* s, lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* x,\n                                lapack_int ldx, float* rcond, float* ferr,\n                                float* berr, lapack_complex_float* work,\n                                float* rwork );\nlapack_int LAPACKE_zppsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                lapack_complex_double* ap,\n                                lapack_complex_double* afp, char* equed,\n                                double* s, lapack_complex_double* b,\n                                lapack_int ldb, lapack_complex_double* x,\n                                lapack_int ldx, double* rcond, double* ferr,\n                                double* berr, lapack_complex_double* work,\n                                double* rwork );\n\nlapack_int LAPACKE_spptrf_work( int matrix_order, char uplo, lapack_int n,\n                                float* ap );\nlapack_int LAPACKE_dpptrf_work( int matrix_order, char uplo, lapack_int n,\n                                double* ap );\nlapack_int LAPACKE_cpptrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* ap );\nlapack_int LAPACKE_zpptrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* ap );\n\nlapack_int LAPACKE_spptri_work( int matrix_order, char uplo, lapack_int n,\n                                float* ap );\nlapack_int LAPACKE_dpptri_work( int matrix_order, char uplo, lapack_int n,\n                                double* ap );\nlapack_int LAPACKE_cpptri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* ap );\nlapack_int LAPACKE_zpptri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* ap );\n\nlapack_int LAPACKE_spptrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* ap, float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_dpptrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* ap, double* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_cpptrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* ap,\n                                lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zpptrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs,\n                                const lapack_complex_double* ap,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_spstrf_work( int matrix_order, char uplo, lapack_int n,\n                                float* a, lapack_int lda, lapack_int* piv,\n                                lapack_int* rank, float tol, float* work );\nlapack_int LAPACKE_dpstrf_work( int matrix_order, char uplo, lapack_int n,\n                                double* a, lapack_int lda, lapack_int* piv,\n                                lapack_int* rank, double tol, double* work );\nlapack_int LAPACKE_cpstrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int* piv, lapack_int* rank, float tol,\n                                float* work );\nlapack_int LAPACKE_zpstrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* piv, lapack_int* rank, double tol,\n                                double* work );\n\nlapack_int LAPACKE_sptcon_work( lapack_int n, const float* d, const float* e,\n                                float anorm, float* rcond, float* work );\nlapack_int LAPACKE_dptcon_work( lapack_int n, const double* d, const double* e,\n                                double anorm, double* rcond, double* work );\nlapack_int LAPACKE_cptcon_work( lapack_int n, const float* d,\n                                const lapack_complex_float* e, float anorm,\n                                float* rcond, float* work );\nlapack_int LAPACKE_zptcon_work( lapack_int n, const double* d,\n                                const lapack_complex_double* e, double anorm,\n                                double* rcond, double* work );\n\nlapack_int LAPACKE_spteqr_work( int matrix_order, char compz, lapack_int n,\n                                float* d, float* e, float* z, lapack_int ldz,\n                                float* work );\nlapack_int LAPACKE_dpteqr_work( int matrix_order, char compz, lapack_int n,\n                                double* d, double* e, double* z, lapack_int ldz,\n                                double* work );\nlapack_int LAPACKE_cpteqr_work( int matrix_order, char compz, lapack_int n,\n                                float* d, float* e, lapack_complex_float* z,\n                                lapack_int ldz, float* work );\nlapack_int LAPACKE_zpteqr_work( int matrix_order, char compz, lapack_int n,\n                                double* d, double* e, lapack_complex_double* z,\n                                lapack_int ldz, double* work );\n\nlapack_int LAPACKE_sptrfs_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                                const float* d, const float* e, const float* df,\n                                const float* ef, const float* b, lapack_int ldb,\n                                float* x, lapack_int ldx, float* ferr,\n                                float* berr, float* work );\nlapack_int LAPACKE_dptrfs_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                                const double* d, const double* e,\n                                const double* df, const double* ef,\n                                const double* b, lapack_int ldb, double* x,\n                                lapack_int ldx, double* ferr, double* berr,\n                                double* work );\nlapack_int LAPACKE_cptrfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* d,\n                                const lapack_complex_float* e, const float* df,\n                                const lapack_complex_float* ef,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zptrfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* d,\n                                const lapack_complex_double* e,\n                                const double* df,\n                                const lapack_complex_double* ef,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sptsv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               float* d, float* e, float* b, lapack_int ldb );\nlapack_int LAPACKE_dptsv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               double* d, double* e, double* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_cptsv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               float* d, lapack_complex_float* e,\n                               lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zptsv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               double* d, lapack_complex_double* e,\n                               lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sptsvx_work( int matrix_order, char fact, lapack_int n,\n                                lapack_int nrhs, const float* d, const float* e,\n                                float* df, float* ef, const float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* rcond, float* ferr, float* berr,\n                                float* work );\nlapack_int LAPACKE_dptsvx_work( int matrix_order, char fact, lapack_int n,\n                                lapack_int nrhs, const double* d,\n                                const double* e, double* df, double* ef,\n                                const double* b, lapack_int ldb, double* x,\n                                lapack_int ldx, double* rcond, double* ferr,\n                                double* berr, double* work );\nlapack_int LAPACKE_cptsvx_work( int matrix_order, char fact, lapack_int n,\n                                lapack_int nrhs, const float* d,\n                                const lapack_complex_float* e, float* df,\n                                lapack_complex_float* ef,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* rcond, float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zptsvx_work( int matrix_order, char fact, lapack_int n,\n                                lapack_int nrhs, const double* d,\n                                const lapack_complex_double* e, double* df,\n                                lapack_complex_double* ef,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_spttrf_work( lapack_int n, float* d, float* e );\nlapack_int LAPACKE_dpttrf_work( lapack_int n, double* d, double* e );\nlapack_int LAPACKE_cpttrf_work( lapack_int n, float* d,\n                                lapack_complex_float* e );\nlapack_int LAPACKE_zpttrf_work( lapack_int n, double* d,\n                                lapack_complex_double* e );\n\nlapack_int LAPACKE_spttrs_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                                const float* d, const float* e, float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_dpttrs_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                                const double* d, const double* e, double* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_cpttrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* d,\n                                const lapack_complex_float* e,\n                                lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zpttrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* d,\n                                const lapack_complex_double* e,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_ssbev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_int kd, float* ab,\n                               lapack_int ldab, float* w, float* z,\n                               lapack_int ldz, float* work );\nlapack_int LAPACKE_dsbev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_int kd, double* ab,\n                               lapack_int ldab, double* w, double* z,\n                               lapack_int ldz, double* work );\n\nlapack_int LAPACKE_ssbevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_int kd, float* ab,\n                                lapack_int ldab, float* w, float* z,\n                                lapack_int ldz, float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_dsbevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_int kd, double* ab,\n                                lapack_int ldab, double* w, double* z,\n                                lapack_int ldz, double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_ssbevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, lapack_int kd,\n                                float* ab, lapack_int ldab, float* q,\n                                lapack_int ldq, float vl, float vu,\n                                lapack_int il, lapack_int iu, float abstol,\n                                lapack_int* m, float* w, float* z,\n                                lapack_int ldz, float* work, lapack_int* iwork,\n                                lapack_int* ifail );\nlapack_int LAPACKE_dsbevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, lapack_int kd,\n                                double* ab, lapack_int ldab, double* q,\n                                lapack_int ldq, double vl, double vu,\n                                lapack_int il, lapack_int iu, double abstol,\n                                lapack_int* m, double* w, double* z,\n                                lapack_int ldz, double* work, lapack_int* iwork,\n                                lapack_int* ifail );\n\nlapack_int LAPACKE_ssbgst_work( int matrix_order, char vect, char uplo,\n                                lapack_int n, lapack_int ka, lapack_int kb,\n                                float* ab, lapack_int ldab, const float* bb,\n                                lapack_int ldbb, float* x, lapack_int ldx,\n                                float* work );\nlapack_int LAPACKE_dsbgst_work( int matrix_order, char vect, char uplo,\n                                lapack_int n, lapack_int ka, lapack_int kb,\n                                double* ab, lapack_int ldab, const double* bb,\n                                lapack_int ldbb, double* x, lapack_int ldx,\n                                double* work );\n\nlapack_int LAPACKE_ssbgv_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_int ka, lapack_int kb,\n                               float* ab, lapack_int ldab, float* bb,\n                               lapack_int ldbb, float* w, float* z,\n                               lapack_int ldz, float* work );\nlapack_int LAPACKE_dsbgv_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_int ka, lapack_int kb,\n                               double* ab, lapack_int ldab, double* bb,\n                               lapack_int ldbb, double* w, double* z,\n                               lapack_int ldz, double* work );\n\nlapack_int LAPACKE_ssbgvd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_int ka, lapack_int kb,\n                                float* ab, lapack_int ldab, float* bb,\n                                lapack_int ldbb, float* w, float* z,\n                                lapack_int ldz, float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_dsbgvd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_int ka, lapack_int kb,\n                                double* ab, lapack_int ldab, double* bb,\n                                lapack_int ldbb, double* w, double* z,\n                                lapack_int ldz, double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_ssbgvx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, lapack_int ka,\n                                lapack_int kb, float* ab, lapack_int ldab,\n                                float* bb, lapack_int ldbb, float* q,\n                                lapack_int ldq, float vl, float vu,\n                                lapack_int il, lapack_int iu, float abstol,\n                                lapack_int* m, float* w, float* z,\n                                lapack_int ldz, float* work, lapack_int* iwork,\n                                lapack_int* ifail );\nlapack_int LAPACKE_dsbgvx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, lapack_int ka,\n                                lapack_int kb, double* ab, lapack_int ldab,\n                                double* bb, lapack_int ldbb, double* q,\n                                lapack_int ldq, double vl, double vu,\n                                lapack_int il, lapack_int iu, double abstol,\n                                lapack_int* m, double* w, double* z,\n                                lapack_int ldz, double* work, lapack_int* iwork,\n                                lapack_int* ifail );\n\nlapack_int LAPACKE_ssbtrd_work( int matrix_order, char vect, char uplo,\n                                lapack_int n, lapack_int kd, float* ab,\n                                lapack_int ldab, float* d, float* e, float* q,\n                                lapack_int ldq, float* work );\nlapack_int LAPACKE_dsbtrd_work( int matrix_order, char vect, char uplo,\n                                lapack_int n, lapack_int kd, double* ab,\n                                lapack_int ldab, double* d, double* e,\n                                double* q, lapack_int ldq, double* work );\n\nlapack_int LAPACKE_ssfrk_work( int matrix_order, char transr, char uplo,\n                               char trans, lapack_int n, lapack_int k,\n                               float alpha, const float* a, lapack_int lda,\n                               float beta, float* c );\nlapack_int LAPACKE_dsfrk_work( int matrix_order, char transr, char uplo,\n                               char trans, lapack_int n, lapack_int k,\n                               double alpha, const double* a, lapack_int lda,\n                               double beta, double* c );\n\nlapack_int LAPACKE_sspcon_work( int matrix_order, char uplo, lapack_int n,\n                                const float* ap, const lapack_int* ipiv,\n                                float anorm, float* rcond, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dspcon_work( int matrix_order, char uplo, lapack_int n,\n                                const double* ap, const lapack_int* ipiv,\n                                double anorm, double* rcond, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_cspcon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_float* ap,\n                                const lapack_int* ipiv, float anorm,\n                                float* rcond, lapack_complex_float* work );\nlapack_int LAPACKE_zspcon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_double* ap,\n                                const lapack_int* ipiv, double anorm,\n                                double* rcond, lapack_complex_double* work );\n\nlapack_int LAPACKE_sspev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, float* ap, float* w, float* z,\n                               lapack_int ldz, float* work );\nlapack_int LAPACKE_dspev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, double* ap, double* w, double* z,\n                               lapack_int ldz, double* work );\n\nlapack_int LAPACKE_sspevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, float* ap, float* w, float* z,\n                                lapack_int ldz, float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_dspevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, double* ap, double* w, double* z,\n                                lapack_int ldz, double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_sspevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, float* ap, float vl,\n                                float vu, lapack_int il, lapack_int iu,\n                                float abstol, lapack_int* m, float* w, float* z,\n                                lapack_int ldz, float* work, lapack_int* iwork,\n                                lapack_int* ifail );\nlapack_int LAPACKE_dspevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, double* ap, double vl,\n                                double vu, lapack_int il, lapack_int iu,\n                                double abstol, lapack_int* m, double* w,\n                                double* z, lapack_int ldz, double* work,\n                                lapack_int* iwork, lapack_int* ifail );\n\nlapack_int LAPACKE_sspgst_work( int matrix_order, lapack_int itype, char uplo,\n                                lapack_int n, float* ap, const float* bp );\nlapack_int LAPACKE_dspgst_work( int matrix_order, lapack_int itype, char uplo,\n                                lapack_int n, double* ap, const double* bp );\n\nlapack_int LAPACKE_sspgv_work( int matrix_order, lapack_int itype, char jobz,\n                               char uplo, lapack_int n, float* ap, float* bp,\n                               float* w, float* z, lapack_int ldz,\n                               float* work );\nlapack_int LAPACKE_dspgv_work( int matrix_order, lapack_int itype, char jobz,\n                               char uplo, lapack_int n, double* ap, double* bp,\n                               double* w, double* z, lapack_int ldz,\n                               double* work );\n\nlapack_int LAPACKE_sspgvd_work( int matrix_order, lapack_int itype, char jobz,\n                                char uplo, lapack_int n, float* ap, float* bp,\n                                float* w, float* z, lapack_int ldz, float* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_dspgvd_work( int matrix_order, lapack_int itype, char jobz,\n                                char uplo, lapack_int n, double* ap, double* bp,\n                                double* w, double* z, lapack_int ldz,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_sspgvx_work( int matrix_order, lapack_int itype, char jobz,\n                                char range, char uplo, lapack_int n, float* ap,\n                                float* bp, float vl, float vu, lapack_int il,\n                                lapack_int iu, float abstol, lapack_int* m,\n                                float* w, float* z, lapack_int ldz, float* work,\n                                lapack_int* iwork, lapack_int* ifail );\nlapack_int LAPACKE_dspgvx_work( int matrix_order, lapack_int itype, char jobz,\n                                char range, char uplo, lapack_int n, double* ap,\n                                double* bp, double vl, double vu, lapack_int il,\n                                lapack_int iu, double abstol, lapack_int* m,\n                                double* w, double* z, lapack_int ldz,\n                                double* work, lapack_int* iwork,\n                                lapack_int* ifail );\n\nlapack_int LAPACKE_ssprfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* ap,\n                                const float* afp, const lapack_int* ipiv,\n                                const float* b, lapack_int ldb, float* x,\n                                lapack_int ldx, float* ferr, float* berr,\n                                float* work, lapack_int* iwork );\nlapack_int LAPACKE_dsprfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* ap,\n                                const double* afp, const lapack_int* ipiv,\n                                const double* b, lapack_int ldb, double* x,\n                                lapack_int ldx, double* ferr, double* berr,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_csprfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* ap,\n                                const lapack_complex_float* afp,\n                                const lapack_int* ipiv,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zsprfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs,\n                                const lapack_complex_double* ap,\n                                const lapack_complex_double* afp,\n                                const lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sspsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, float* ap, lapack_int* ipiv,\n                               float* b, lapack_int ldb );\nlapack_int LAPACKE_dspsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, double* ap, lapack_int* ipiv,\n                               double* b, lapack_int ldb );\nlapack_int LAPACKE_cspsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_float* ap,\n                               lapack_int* ipiv, lapack_complex_float* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_zspsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_double* ap,\n                               lapack_int* ipiv, lapack_complex_double* b,\n                               lapack_int ldb );\n\nlapack_int LAPACKE_sspsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs, const float* ap,\n                                float* afp, lapack_int* ipiv, const float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* rcond, float* ferr, float* berr,\n                                float* work, lapack_int* iwork );\nlapack_int LAPACKE_dspsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs, const double* ap,\n                                double* afp, lapack_int* ipiv, const double* b,\n                                lapack_int ldb, double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_cspsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_float* ap,\n                                lapack_complex_float* afp, lapack_int* ipiv,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* rcond, float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zspsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_double* ap,\n                                lapack_complex_double* afp, lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_ssptrd_work( int matrix_order, char uplo, lapack_int n,\n                                float* ap, float* d, float* e, float* tau );\nlapack_int LAPACKE_dsptrd_work( int matrix_order, char uplo, lapack_int n,\n                                double* ap, double* d, double* e, double* tau );\n\nlapack_int LAPACKE_ssptrf_work( int matrix_order, char uplo, lapack_int n,\n                                float* ap, lapack_int* ipiv );\nlapack_int LAPACKE_dsptrf_work( int matrix_order, char uplo, lapack_int n,\n                                double* ap, lapack_int* ipiv );\nlapack_int LAPACKE_csptrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* ap, lapack_int* ipiv );\nlapack_int LAPACKE_zsptrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* ap, lapack_int* ipiv );\n\nlapack_int LAPACKE_ssptri_work( int matrix_order, char uplo, lapack_int n,\n                                float* ap, const lapack_int* ipiv,\n                                float* work );\nlapack_int LAPACKE_dsptri_work( int matrix_order, char uplo, lapack_int n,\n                                double* ap, const lapack_int* ipiv,\n                                double* work );\nlapack_int LAPACKE_csptri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* ap,\n                                const lapack_int* ipiv,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zsptri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* ap,\n                                const lapack_int* ipiv,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_ssptrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* ap,\n                                const lapack_int* ipiv, float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_dsptrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* ap,\n                                const lapack_int* ipiv, double* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_csptrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* ap,\n                                const lapack_int* ipiv, lapack_complex_float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_zsptrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs,\n                                const lapack_complex_double* ap,\n                                const lapack_int* ipiv,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sstebz_work( char range, char order, lapack_int n, float vl,\n                                float vu, lapack_int il, lapack_int iu,\n                                float abstol, const float* d, const float* e,\n                                lapack_int* m, lapack_int* nsplit, float* w,\n                                lapack_int* iblock, lapack_int* isplit,\n                                float* work, lapack_int* iwork );\nlapack_int LAPACKE_dstebz_work( char range, char order, lapack_int n, double vl,\n                                double vu, lapack_int il, lapack_int iu,\n                                double abstol, const double* d, const double* e,\n                                lapack_int* m, lapack_int* nsplit, double* w,\n                                lapack_int* iblock, lapack_int* isplit,\n                                double* work, lapack_int* iwork );\n\nlapack_int LAPACKE_sstedc_work( int matrix_order, char compz, lapack_int n,\n                                float* d, float* e, float* z, lapack_int ldz,\n                                float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_dstedc_work( int matrix_order, char compz, lapack_int n,\n                                double* d, double* e, double* z, lapack_int ldz,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_cstedc_work( int matrix_order, char compz, lapack_int n,\n                                float* d, float* e, lapack_complex_float* z,\n                                lapack_int ldz, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork,\n                                lapack_int lrwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_zstedc_work( int matrix_order, char compz, lapack_int n,\n                                double* d, double* e, lapack_complex_double* z,\n                                lapack_int ldz, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork,\n                                lapack_int lrwork, lapack_int* iwork,\n                                lapack_int liwork );\n\nlapack_int LAPACKE_sstegr_work( int matrix_order, char jobz, char range,\n                                lapack_int n, float* d, float* e, float vl,\n                                float vu, lapack_int il, lapack_int iu,\n                                float abstol, lapack_int* m, float* w, float* z,\n                                lapack_int ldz, lapack_int* isuppz, float* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_dstegr_work( int matrix_order, char jobz, char range,\n                                lapack_int n, double* d, double* e, double vl,\n                                double vu, lapack_int il, lapack_int iu,\n                                double abstol, lapack_int* m, double* w,\n                                double* z, lapack_int ldz, lapack_int* isuppz,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_cstegr_work( int matrix_order, char jobz, char range,\n                                lapack_int n, float* d, float* e, float vl,\n                                float vu, lapack_int il, lapack_int iu,\n                                float abstol, lapack_int* m, float* w,\n                                lapack_complex_float* z, lapack_int ldz,\n                                lapack_int* isuppz, float* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_zstegr_work( int matrix_order, char jobz, char range,\n                                lapack_int n, double* d, double* e, double vl,\n                                double vu, lapack_int il, lapack_int iu,\n                                double abstol, lapack_int* m, double* w,\n                                lapack_complex_double* z, lapack_int ldz,\n                                lapack_int* isuppz, double* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int liwork );\n\nlapack_int LAPACKE_sstein_work( int matrix_order, lapack_int n, const float* d,\n                                const float* e, lapack_int m, const float* w,\n                                const lapack_int* iblock,\n                                const lapack_int* isplit, float* z,\n                                lapack_int ldz, float* work, lapack_int* iwork,\n                                lapack_int* ifailv );\nlapack_int LAPACKE_dstein_work( int matrix_order, lapack_int n, const double* d,\n                                const double* e, lapack_int m, const double* w,\n                                const lapack_int* iblock,\n                                const lapack_int* isplit, double* z,\n                                lapack_int ldz, double* work, lapack_int* iwork,\n                                lapack_int* ifailv );\nlapack_int LAPACKE_cstein_work( int matrix_order, lapack_int n, const float* d,\n                                const float* e, lapack_int m, const float* w,\n                                const lapack_int* iblock,\n                                const lapack_int* isplit,\n                                lapack_complex_float* z, lapack_int ldz,\n                                float* work, lapack_int* iwork,\n                                lapack_int* ifailv );\nlapack_int LAPACKE_zstein_work( int matrix_order, lapack_int n, const double* d,\n                                const double* e, lapack_int m, const double* w,\n                                const lapack_int* iblock,\n                                const lapack_int* isplit,\n                                lapack_complex_double* z, lapack_int ldz,\n                                double* work, lapack_int* iwork,\n                                lapack_int* ifailv );\n\nlapack_int LAPACKE_sstemr_work( int matrix_order, char jobz, char range,\n                                lapack_int n, float* d, float* e, float vl,\n                                float vu, lapack_int il, lapack_int iu,\n                                lapack_int* m, float* w, float* z,\n                                lapack_int ldz, lapack_int nzc,\n                                lapack_int* isuppz, lapack_logical* tryrac,\n                                float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_dstemr_work( int matrix_order, char jobz, char range,\n                                lapack_int n, double* d, double* e, double vl,\n                                double vu, lapack_int il, lapack_int iu,\n                                lapack_int* m, double* w, double* z,\n                                lapack_int ldz, lapack_int nzc,\n                                lapack_int* isuppz, lapack_logical* tryrac,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_cstemr_work( int matrix_order, char jobz, char range,\n                                lapack_int n, float* d, float* e, float vl,\n                                float vu, lapack_int il, lapack_int iu,\n                                lapack_int* m, float* w,\n                                lapack_complex_float* z, lapack_int ldz,\n                                lapack_int nzc, lapack_int* isuppz,\n                                lapack_logical* tryrac, float* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_zstemr_work( int matrix_order, char jobz, char range,\n                                lapack_int n, double* d, double* e, double vl,\n                                double vu, lapack_int il, lapack_int iu,\n                                lapack_int* m, double* w,\n                                lapack_complex_double* z, lapack_int ldz,\n                                lapack_int nzc, lapack_int* isuppz,\n                                lapack_logical* tryrac, double* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int liwork );\n\nlapack_int LAPACKE_ssteqr_work( int matrix_order, char compz, lapack_int n,\n                                float* d, float* e, float* z, lapack_int ldz,\n                                float* work );\nlapack_int LAPACKE_dsteqr_work( int matrix_order, char compz, lapack_int n,\n                                double* d, double* e, double* z, lapack_int ldz,\n                                double* work );\nlapack_int LAPACKE_csteqr_work( int matrix_order, char compz, lapack_int n,\n                                float* d, float* e, lapack_complex_float* z,\n                                lapack_int ldz, float* work );\nlapack_int LAPACKE_zsteqr_work( int matrix_order, char compz, lapack_int n,\n                                double* d, double* e, lapack_complex_double* z,\n                                lapack_int ldz, double* work );\n\nlapack_int LAPACKE_ssterf_work( lapack_int n, float* d, float* e );\nlapack_int LAPACKE_dsterf_work( lapack_int n, double* d, double* e );\n\nlapack_int LAPACKE_sstev_work( int matrix_order, char jobz, lapack_int n,\n                               float* d, float* e, float* z, lapack_int ldz,\n                               float* work );\nlapack_int LAPACKE_dstev_work( int matrix_order, char jobz, lapack_int n,\n                               double* d, double* e, double* z, lapack_int ldz,\n                               double* work );\n\nlapack_int LAPACKE_sstevd_work( int matrix_order, char jobz, lapack_int n,\n                                float* d, float* e, float* z, lapack_int ldz,\n                                float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_dstevd_work( int matrix_order, char jobz, lapack_int n,\n                                double* d, double* e, double* z, lapack_int ldz,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_sstevr_work( int matrix_order, char jobz, char range,\n                                lapack_int n, float* d, float* e, float vl,\n                                float vu, lapack_int il, lapack_int iu,\n                                float abstol, lapack_int* m, float* w, float* z,\n                                lapack_int ldz, lapack_int* isuppz, float* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_dstevr_work( int matrix_order, char jobz, char range,\n                                lapack_int n, double* d, double* e, double vl,\n                                double vu, lapack_int il, lapack_int iu,\n                                double abstol, lapack_int* m, double* w,\n                                double* z, lapack_int ldz, lapack_int* isuppz,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_sstevx_work( int matrix_order, char jobz, char range,\n                                lapack_int n, float* d, float* e, float vl,\n                                float vu, lapack_int il, lapack_int iu,\n                                float abstol, lapack_int* m, float* w, float* z,\n                                lapack_int ldz, float* work, lapack_int* iwork,\n                                lapack_int* ifail );\nlapack_int LAPACKE_dstevx_work( int matrix_order, char jobz, char range,\n                                lapack_int n, double* d, double* e, double vl,\n                                double vu, lapack_int il, lapack_int iu,\n                                double abstol, lapack_int* m, double* w,\n                                double* z, lapack_int ldz, double* work,\n                                lapack_int* iwork, lapack_int* ifail );\n\nlapack_int LAPACKE_ssycon_work( int matrix_order, char uplo, lapack_int n,\n                                const float* a, lapack_int lda,\n                                const lapack_int* ipiv, float anorm,\n                                float* rcond, float* work, lapack_int* iwork );\nlapack_int LAPACKE_dsycon_work( int matrix_order, char uplo, lapack_int n,\n                                const double* a, lapack_int lda,\n                                const lapack_int* ipiv, double anorm,\n                                double* rcond, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_csycon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_int* ipiv, float anorm,\n                                float* rcond, lapack_complex_float* work );\nlapack_int LAPACKE_zsycon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_int* ipiv, double anorm,\n                                double* rcond, lapack_complex_double* work );\n\nlapack_int LAPACKE_ssyequb_work( int matrix_order, char uplo, lapack_int n,\n                                 const float* a, lapack_int lda, float* s,\n                                 float* scond, float* amax, float* work );\nlapack_int LAPACKE_dsyequb_work( int matrix_order, char uplo, lapack_int n,\n                                 const double* a, lapack_int lda, double* s,\n                                 double* scond, double* amax, double* work );\nlapack_int LAPACKE_csyequb_work( int matrix_order, char uplo, lapack_int n,\n                                 const lapack_complex_float* a, lapack_int lda,\n                                 float* s, float* scond, float* amax,\n                                 lapack_complex_float* work );\nlapack_int LAPACKE_zsyequb_work( int matrix_order, char uplo, lapack_int n,\n                                 const lapack_complex_double* a, lapack_int lda,\n                                 double* s, double* scond, double* amax,\n                                 lapack_complex_double* work );\n\nlapack_int LAPACKE_ssyev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, float* a, lapack_int lda, float* w,\n                               float* work, lapack_int lwork );\nlapack_int LAPACKE_dsyev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, double* a, lapack_int lda,\n                               double* w, double* work, lapack_int lwork );\n\nlapack_int LAPACKE_ssyevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, float* a, lapack_int lda,\n                                float* w, float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_dsyevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, double* a, lapack_int lda,\n                                double* w, double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_ssyevr_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, float* a,\n                                lapack_int lda, float vl, float vu,\n                                lapack_int il, lapack_int iu, float abstol,\n                                lapack_int* m, float* w, float* z,\n                                lapack_int ldz, lapack_int* isuppz, float* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_dsyevr_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, double* a,\n                                lapack_int lda, double vl, double vu,\n                                lapack_int il, lapack_int iu, double abstol,\n                                lapack_int* m, double* w, double* z,\n                                lapack_int ldz, lapack_int* isuppz,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_ssyevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, float* a,\n                                lapack_int lda, float vl, float vu,\n                                lapack_int il, lapack_int iu, float abstol,\n                                lapack_int* m, float* w, float* z,\n                                lapack_int ldz, float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int* ifail );\nlapack_int LAPACKE_dsyevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, double* a,\n                                lapack_int lda, double vl, double vu,\n                                lapack_int il, lapack_int iu, double abstol,\n                                lapack_int* m, double* w, double* z,\n                                lapack_int ldz, double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int* ifail );\n\nlapack_int LAPACKE_ssygst_work( int matrix_order, lapack_int itype, char uplo,\n                                lapack_int n, float* a, lapack_int lda,\n                                const float* b, lapack_int ldb );\nlapack_int LAPACKE_dsygst_work( int matrix_order, lapack_int itype, char uplo,\n                                lapack_int n, double* a, lapack_int lda,\n                                const double* b, lapack_int ldb );\n\nlapack_int LAPACKE_ssygv_work( int matrix_order, lapack_int itype, char jobz,\n                               char uplo, lapack_int n, float* a,\n                               lapack_int lda, float* b, lapack_int ldb,\n                               float* w, float* work, lapack_int lwork );\nlapack_int LAPACKE_dsygv_work( int matrix_order, lapack_int itype, char jobz,\n                               char uplo, lapack_int n, double* a,\n                               lapack_int lda, double* b, lapack_int ldb,\n                               double* w, double* work, lapack_int lwork );\n\nlapack_int LAPACKE_ssygvd_work( int matrix_order, lapack_int itype, char jobz,\n                                char uplo, lapack_int n, float* a,\n                                lapack_int lda, float* b, lapack_int ldb,\n                                float* w, float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_dsygvd_work( int matrix_order, lapack_int itype, char jobz,\n                                char uplo, lapack_int n, double* a,\n                                lapack_int lda, double* b, lapack_int ldb,\n                                double* w, double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_ssygvx_work( int matrix_order, lapack_int itype, char jobz,\n                                char range, char uplo, lapack_int n, float* a,\n                                lapack_int lda, float* b, lapack_int ldb,\n                                float vl, float vu, lapack_int il,\n                                lapack_int iu, float abstol, lapack_int* m,\n                                float* w, float* z, lapack_int ldz, float* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int* ifail );\nlapack_int LAPACKE_dsygvx_work( int matrix_order, lapack_int itype, char jobz,\n                                char range, char uplo, lapack_int n, double* a,\n                                lapack_int lda, double* b, lapack_int ldb,\n                                double vl, double vu, lapack_int il,\n                                lapack_int iu, double abstol, lapack_int* m,\n                                double* w, double* z, lapack_int ldz,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int* ifail );\n\nlapack_int LAPACKE_ssyrfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* a, lapack_int lda,\n                                const float* af, lapack_int ldaf,\n                                const lapack_int* ipiv, const float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* ferr, float* berr, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dsyrfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* a,\n                                lapack_int lda, const double* af,\n                                lapack_int ldaf, const lapack_int* ipiv,\n                                const double* b, lapack_int ldb, double* x,\n                                lapack_int ldx, double* ferr, double* berr,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_csyrfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* af,\n                                lapack_int ldaf, const lapack_int* ipiv,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zsyrfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_double* a,\n                                lapack_int lda, const lapack_complex_double* af,\n                                lapack_int ldaf, const lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_ssyrfsx_work( int matrix_order, char uplo, char equed,\n                                 lapack_int n, lapack_int nrhs, const float* a,\n                                 lapack_int lda, const float* af,\n                                 lapack_int ldaf, const lapack_int* ipiv,\n                                 const float* s, const float* b, lapack_int ldb,\n                                 float* x, lapack_int ldx, float* rcond,\n                                 float* berr, lapack_int n_err_bnds,\n                                 float* err_bnds_norm, float* err_bnds_comp,\n                                 lapack_int nparams, float* params, float* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_dsyrfsx_work( int matrix_order, char uplo, char equed,\n                                 lapack_int n, lapack_int nrhs, const double* a,\n                                 lapack_int lda, const double* af,\n                                 lapack_int ldaf, const lapack_int* ipiv,\n                                 const double* s, const double* b,\n                                 lapack_int ldb, double* x, lapack_int ldx,\n                                 double* rcond, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, double* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_csyrfsx_work( int matrix_order, char uplo, char equed,\n                                 lapack_int n, lapack_int nrhs,\n                                 const lapack_complex_float* a, lapack_int lda,\n                                 const lapack_complex_float* af,\n                                 lapack_int ldaf, const lapack_int* ipiv,\n                                 const float* s, const lapack_complex_float* b,\n                                 lapack_int ldb, lapack_complex_float* x,\n                                 lapack_int ldx, float* rcond, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, lapack_complex_float* work,\n                                 float* rwork );\nlapack_int LAPACKE_zsyrfsx_work( int matrix_order, char uplo, char equed,\n                                 lapack_int n, lapack_int nrhs,\n                                 const lapack_complex_double* a, lapack_int lda,\n                                 const lapack_complex_double* af,\n                                 lapack_int ldaf, const lapack_int* ipiv,\n                                 const double* s,\n                                 const lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* x, lapack_int ldx,\n                                 double* rcond, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, lapack_complex_double* work,\n                                 double* rwork );\n\nlapack_int LAPACKE_ssysv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, float* a, lapack_int lda,\n                               lapack_int* ipiv, float* b, lapack_int ldb,\n                               float* work, lapack_int lwork );\nlapack_int LAPACKE_dsysv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, double* a, lapack_int lda,\n                               lapack_int* ipiv, double* b, lapack_int ldb,\n                               double* work, lapack_int lwork );\nlapack_int LAPACKE_csysv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_float* a,\n                               lapack_int lda, lapack_int* ipiv,\n                               lapack_complex_float* b, lapack_int ldb,\n                               lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zsysv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_double* a,\n                               lapack_int lda, lapack_int* ipiv,\n                               lapack_complex_double* b, lapack_int ldb,\n                               lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_ssysv_rook_work( int matrix_order, char uplo, lapack_int n,\n                                    lapack_int nrhs, float* a, lapack_int lda,\n                                    lapack_int* ipiv, float* b, lapack_int ldb,\n                                    float* work, lapack_int lwork );\nlapack_int LAPACKE_dsysv_rook_work( int matrix_order, char uplo, lapack_int n,\n                                    lapack_int nrhs, double* a, lapack_int lda,\n                                    lapack_int* ipiv, double* b, lapack_int ldb,\n                                    double* work, lapack_int lwork );\nlapack_int LAPACKE_csysv_rook_work( int matrix_order, char uplo, lapack_int n,\n                                    lapack_int nrhs, lapack_complex_float* a,\n                                    lapack_int lda, lapack_int* ipiv,\n                                    lapack_complex_float* b, lapack_int ldb,\n                                    lapack_complex_float* work,\n                                    lapack_int lwork );\nlapack_int LAPACKE_zsysv_rook_work( int matrix_order, char uplo, lapack_int n,\n                                    lapack_int nrhs, lapack_complex_double* a,\n                                    lapack_int lda, lapack_int* ipiv,\n                                    lapack_complex_double* b, lapack_int ldb,\n                                    lapack_complex_double* work,\n                                    lapack_int lwork );\n\nlapack_int LAPACKE_ssysvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs, const float* a,\n                                lapack_int lda, float* af, lapack_int ldaf,\n                                lapack_int* ipiv, const float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* rcond, float* ferr, float* berr,\n                                float* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dsysvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs, const double* a,\n                                lapack_int lda, double* af, lapack_int ldaf,\n                                lapack_int* ipiv, const double* b,\n                                lapack_int ldb, double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_csysvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* af, lapack_int ldaf,\n                                lapack_int* ipiv, const lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* x,\n                                lapack_int ldx, float* rcond, float* ferr,\n                                float* berr, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork );\nlapack_int LAPACKE_zsysvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* af, lapack_int ldaf,\n                                lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork );\n\nlapack_int LAPACKE_ssysvxx_work( int matrix_order, char fact, char uplo,\n                                 lapack_int n, lapack_int nrhs, float* a,\n                                 lapack_int lda, float* af, lapack_int ldaf,\n                                 lapack_int* ipiv, char* equed, float* s,\n                                 float* b, lapack_int ldb, float* x,\n                                 lapack_int ldx, float* rcond, float* rpvgrw,\n                                 float* berr, lapack_int n_err_bnds,\n                                 float* err_bnds_norm, float* err_bnds_comp,\n                                 lapack_int nparams, float* params, float* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_dsysvxx_work( int matrix_order, char fact, char uplo,\n                                 lapack_int n, lapack_int nrhs, double* a,\n                                 lapack_int lda, double* af, lapack_int ldaf,\n                                 lapack_int* ipiv, char* equed, double* s,\n                                 double* b, lapack_int ldb, double* x,\n                                 lapack_int ldx, double* rcond, double* rpvgrw,\n                                 double* berr, lapack_int n_err_bnds,\n                                 double* err_bnds_norm, double* err_bnds_comp,\n                                 lapack_int nparams, double* params,\n                                 double* work, lapack_int* iwork );\nlapack_int LAPACKE_csysvxx_work( int matrix_order, char fact, char uplo,\n                                 lapack_int n, lapack_int nrhs,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 lapack_complex_float* af, lapack_int ldaf,\n                                 lapack_int* ipiv, char* equed, float* s,\n                                 lapack_complex_float* b, lapack_int ldb,\n                                 lapack_complex_float* x, lapack_int ldx,\n                                 float* rcond, float* rpvgrw, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, lapack_complex_float* work,\n                                 float* rwork );\nlapack_int LAPACKE_zsysvxx_work( int matrix_order, char fact, char uplo,\n                                 lapack_int n, lapack_int nrhs,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 lapack_complex_double* af, lapack_int ldaf,\n                                 lapack_int* ipiv, char* equed, double* s,\n                                 lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* x, lapack_int ldx,\n                                 double* rcond, double* rpvgrw, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, lapack_complex_double* work,\n                                 double* rwork );\n\nlapack_int LAPACKE_ssytrd_work( int matrix_order, char uplo, lapack_int n,\n                                float* a, lapack_int lda, float* d, float* e,\n                                float* tau, float* work, lapack_int lwork );\nlapack_int LAPACKE_dsytrd_work( int matrix_order, char uplo, lapack_int n,\n                                double* a, lapack_int lda, double* d, double* e,\n                                double* tau, double* work, lapack_int lwork );\n\nlapack_int LAPACKE_ssytrf_work( int matrix_order, char uplo, lapack_int n,\n                                float* a, lapack_int lda, lapack_int* ipiv,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dsytrf_work( int matrix_order, char uplo, lapack_int n,\n                                double* a, lapack_int lda, lapack_int* ipiv,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_csytrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int* ipiv, lapack_complex_float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_zsytrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* ipiv, lapack_complex_double* work,\n                                lapack_int lwork );\n\nlapack_int LAPACKE_ssytri_work( int matrix_order, char uplo, lapack_int n,\n                                float* a, lapack_int lda,\n                                const lapack_int* ipiv, float* work );\nlapack_int LAPACKE_dsytri_work( int matrix_order, char uplo, lapack_int n,\n                                double* a, lapack_int lda,\n                                const lapack_int* ipiv, double* work );\nlapack_int LAPACKE_csytri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                const lapack_int* ipiv,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zsytri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                const lapack_int* ipiv,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_ssytrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* a, lapack_int lda,\n                                const lapack_int* ipiv, float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_dsytrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                double* b, lapack_int ldb );\nlapack_int LAPACKE_csytrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zsytrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_double* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_stbcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n, lapack_int kd,\n                                const float* ab, lapack_int ldab, float* rcond,\n                                float* work, lapack_int* iwork );\nlapack_int LAPACKE_dtbcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n, lapack_int kd,\n                                const double* ab, lapack_int ldab,\n                                double* rcond, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_ctbcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n, lapack_int kd,\n                                const lapack_complex_float* ab, lapack_int ldab,\n                                float* rcond, lapack_complex_float* work,\n                                float* rwork );\nlapack_int LAPACKE_ztbcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n, lapack_int kd,\n                                const lapack_complex_double* ab,\n                                lapack_int ldab, double* rcond,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_stbrfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int kd,\n                                lapack_int nrhs, const float* ab,\n                                lapack_int ldab, const float* b, lapack_int ldb,\n                                const float* x, lapack_int ldx, float* ferr,\n                                float* berr, float* work, lapack_int* iwork );\nlapack_int LAPACKE_dtbrfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int kd,\n                                lapack_int nrhs, const double* ab,\n                                lapack_int ldab, const double* b,\n                                lapack_int ldb, const double* x, lapack_int ldx,\n                                double* ferr, double* berr, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_ctbrfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int kd,\n                                lapack_int nrhs, const lapack_complex_float* ab,\n                                lapack_int ldab, const lapack_complex_float* b,\n                                lapack_int ldb, const lapack_complex_float* x,\n                                lapack_int ldx, float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_ztbrfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int kd,\n                                lapack_int nrhs,\n                                const lapack_complex_double* ab,\n                                lapack_int ldab, const lapack_complex_double* b,\n                                lapack_int ldb, const lapack_complex_double* x,\n                                lapack_int ldx, double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_stbtrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int kd,\n                                lapack_int nrhs, const float* ab,\n                                lapack_int ldab, float* b, lapack_int ldb );\nlapack_int LAPACKE_dtbtrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int kd,\n                                lapack_int nrhs, const double* ab,\n                                lapack_int ldab, double* b, lapack_int ldb );\nlapack_int LAPACKE_ctbtrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int kd,\n                                lapack_int nrhs, const lapack_complex_float* ab,\n                                lapack_int ldab, lapack_complex_float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_ztbtrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int kd,\n                                lapack_int nrhs,\n                                const lapack_complex_double* ab,\n                                lapack_int ldab, lapack_complex_double* b,\n                                lapack_int ldb );\n\nlapack_int LAPACKE_stfsm_work( int matrix_order, char transr, char side,\n                               char uplo, char trans, char diag, lapack_int m,\n                               lapack_int n, float alpha, const float* a,\n                               float* b, lapack_int ldb );\nlapack_int LAPACKE_dtfsm_work( int matrix_order, char transr, char side,\n                               char uplo, char trans, char diag, lapack_int m,\n                               lapack_int n, double alpha, const double* a,\n                               double* b, lapack_int ldb );\nlapack_int LAPACKE_ctfsm_work( int matrix_order, char transr, char side,\n                               char uplo, char trans, char diag, lapack_int m,\n                               lapack_int n, lapack_complex_float alpha,\n                               const lapack_complex_float* a,\n                               lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_ztfsm_work( int matrix_order, char transr, char side,\n                               char uplo, char trans, char diag, lapack_int m,\n                               lapack_int n, lapack_complex_double alpha,\n                               const lapack_complex_double* a,\n                               lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_stftri_work( int matrix_order, char transr, char uplo,\n                                char diag, lapack_int n, float* a );\nlapack_int LAPACKE_dtftri_work( int matrix_order, char transr, char uplo,\n                                char diag, lapack_int n, double* a );\nlapack_int LAPACKE_ctftri_work( int matrix_order, char transr, char uplo,\n                                char diag, lapack_int n,\n                                lapack_complex_float* a );\nlapack_int LAPACKE_ztftri_work( int matrix_order, char transr, char uplo,\n                                char diag, lapack_int n,\n                                lapack_complex_double* a );\n\nlapack_int LAPACKE_stfttp_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const float* arf, float* ap );\nlapack_int LAPACKE_dtfttp_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const double* arf, double* ap );\nlapack_int LAPACKE_ctfttp_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const lapack_complex_float* arf,\n                                lapack_complex_float* ap );\nlapack_int LAPACKE_ztfttp_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const lapack_complex_double* arf,\n                                lapack_complex_double* ap );\n\nlapack_int LAPACKE_stfttr_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const float* arf, float* a,\n                                lapack_int lda );\nlapack_int LAPACKE_dtfttr_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const double* arf, double* a,\n                                lapack_int lda );\nlapack_int LAPACKE_ctfttr_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const lapack_complex_float* arf,\n                                lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_ztfttr_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const lapack_complex_double* arf,\n                                lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_stgevc_work( int matrix_order, char side, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const float* s, lapack_int lds, const float* p,\n                                lapack_int ldp, float* vl, lapack_int ldvl,\n                                float* vr, lapack_int ldvr, lapack_int mm,\n                                lapack_int* m, float* work );\nlapack_int LAPACKE_dtgevc_work( int matrix_order, char side, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const double* s, lapack_int lds,\n                                const double* p, lapack_int ldp, double* vl,\n                                lapack_int ldvl, double* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m, double* work );\nlapack_int LAPACKE_ctgevc_work( int matrix_order, char side, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const lapack_complex_float* s, lapack_int lds,\n                                const lapack_complex_float* p, lapack_int ldp,\n                                lapack_complex_float* vl, lapack_int ldvl,\n                                lapack_complex_float* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_ztgevc_work( int matrix_order, char side, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const lapack_complex_double* s, lapack_int lds,\n                                const lapack_complex_double* p, lapack_int ldp,\n                                lapack_complex_double* vl, lapack_int ldvl,\n                                lapack_complex_double* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_stgexc_work( int matrix_order, lapack_logical wantq,\n                                lapack_logical wantz, lapack_int n, float* a,\n                                lapack_int lda, float* b, lapack_int ldb,\n                                float* q, lapack_int ldq, float* z,\n                                lapack_int ldz, lapack_int* ifst,\n                                lapack_int* ilst, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dtgexc_work( int matrix_order, lapack_logical wantq,\n                                lapack_logical wantz, lapack_int n, double* a,\n                                lapack_int lda, double* b, lapack_int ldb,\n                                double* q, lapack_int ldq, double* z,\n                                lapack_int ldz, lapack_int* ifst,\n                                lapack_int* ilst, double* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_ctgexc_work( int matrix_order, lapack_logical wantq,\n                                lapack_logical wantz, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* q, lapack_int ldq,\n                                lapack_complex_float* z, lapack_int ldz,\n                                lapack_int ifst, lapack_int ilst );\nlapack_int LAPACKE_ztgexc_work( int matrix_order, lapack_logical wantq,\n                                lapack_logical wantz, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* q, lapack_int ldq,\n                                lapack_complex_double* z, lapack_int ldz,\n                                lapack_int ifst, lapack_int ilst );\n\nlapack_int LAPACKE_stgsen_work( int matrix_order, lapack_int ijob,\n                                lapack_logical wantq, lapack_logical wantz,\n                                const lapack_logical* select, lapack_int n,\n                                float* a, lapack_int lda, float* b,\n                                lapack_int ldb, float* alphar, float* alphai,\n                                float* beta, float* q, lapack_int ldq, float* z,\n                                lapack_int ldz, lapack_int* m, float* pl,\n                                float* pr, float* dif, float* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_dtgsen_work( int matrix_order, lapack_int ijob,\n                                lapack_logical wantq, lapack_logical wantz,\n                                const lapack_logical* select, lapack_int n,\n                                double* a, lapack_int lda, double* b,\n                                lapack_int ldb, double* alphar, double* alphai,\n                                double* beta, double* q, lapack_int ldq,\n                                double* z, lapack_int ldz, lapack_int* m,\n                                double* pl, double* pr, double* dif,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_ctgsen_work( int matrix_order, lapack_int ijob,\n                                lapack_logical wantq, lapack_logical wantz,\n                                const lapack_logical* select, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* alpha,\n                                lapack_complex_float* beta,\n                                lapack_complex_float* q, lapack_int ldq,\n                                lapack_complex_float* z, lapack_int ldz,\n                                lapack_int* m, float* pl, float* pr, float* dif,\n                                lapack_complex_float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_ztgsen_work( int matrix_order, lapack_int ijob,\n                                lapack_logical wantq, lapack_logical wantz,\n                                const lapack_logical* select, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* alpha,\n                                lapack_complex_double* beta,\n                                lapack_complex_double* q, lapack_int ldq,\n                                lapack_complex_double* z, lapack_int ldz,\n                                lapack_int* m, double* pl, double* pr,\n                                double* dif, lapack_complex_double* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int liwork );\n\nlapack_int LAPACKE_stgsja_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int p,\n                                lapack_int n, lapack_int k, lapack_int l,\n                                float* a, lapack_int lda, float* b,\n                                lapack_int ldb, float tola, float tolb,\n                                float* alpha, float* beta, float* u,\n                                lapack_int ldu, float* v, lapack_int ldv,\n                                float* q, lapack_int ldq, float* work,\n                                lapack_int* ncycle );\nlapack_int LAPACKE_dtgsja_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int p,\n                                lapack_int n, lapack_int k, lapack_int l,\n                                double* a, lapack_int lda, double* b,\n                                lapack_int ldb, double tola, double tolb,\n                                double* alpha, double* beta, double* u,\n                                lapack_int ldu, double* v, lapack_int ldv,\n                                double* q, lapack_int ldq, double* work,\n                                lapack_int* ncycle );\nlapack_int LAPACKE_ctgsja_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int p,\n                                lapack_int n, lapack_int k, lapack_int l,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                float tola, float tolb, float* alpha,\n                                float* beta, lapack_complex_float* u,\n                                lapack_int ldu, lapack_complex_float* v,\n                                lapack_int ldv, lapack_complex_float* q,\n                                lapack_int ldq, lapack_complex_float* work,\n                                lapack_int* ncycle );\nlapack_int LAPACKE_ztgsja_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int p,\n                                lapack_int n, lapack_int k, lapack_int l,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                double tola, double tolb, double* alpha,\n                                double* beta, lapack_complex_double* u,\n                                lapack_int ldu, lapack_complex_double* v,\n                                lapack_int ldv, lapack_complex_double* q,\n                                lapack_int ldq, lapack_complex_double* work,\n                                lapack_int* ncycle );\n\nlapack_int LAPACKE_stgsna_work( int matrix_order, char job, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const float* a, lapack_int lda, const float* b,\n                                lapack_int ldb, const float* vl,\n                                lapack_int ldvl, const float* vr,\n                                lapack_int ldvr, float* s, float* dif,\n                                lapack_int mm, lapack_int* m, float* work,\n                                lapack_int lwork, lapack_int* iwork );\nlapack_int LAPACKE_dtgsna_work( int matrix_order, char job, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const double* a, lapack_int lda,\n                                const double* b, lapack_int ldb,\n                                const double* vl, lapack_int ldvl,\n                                const double* vr, lapack_int ldvr, double* s,\n                                double* dif, lapack_int mm, lapack_int* m,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_ctgsna_work( int matrix_order, char job, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                const lapack_complex_float* vl, lapack_int ldvl,\n                                const lapack_complex_float* vr, lapack_int ldvr,\n                                float* s, float* dif, lapack_int mm,\n                                lapack_int* m, lapack_complex_float* work,\n                                lapack_int lwork, lapack_int* iwork );\nlapack_int LAPACKE_ztgsna_work( int matrix_order, char job, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                const lapack_complex_double* vl,\n                                lapack_int ldvl,\n                                const lapack_complex_double* vr,\n                                lapack_int ldvr, double* s, double* dif,\n                                lapack_int mm, lapack_int* m,\n                                lapack_complex_double* work, lapack_int lwork,\n                                lapack_int* iwork );\n\nlapack_int LAPACKE_stgsyl_work( int matrix_order, char trans, lapack_int ijob,\n                                lapack_int m, lapack_int n, const float* a,\n                                lapack_int lda, const float* b, lapack_int ldb,\n                                float* c, lapack_int ldc, const float* d,\n                                lapack_int ldd, const float* e, lapack_int lde,\n                                float* f, lapack_int ldf, float* scale,\n                                float* dif, float* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dtgsyl_work( int matrix_order, char trans, lapack_int ijob,\n                                lapack_int m, lapack_int n, const double* a,\n                                lapack_int lda, const double* b, lapack_int ldb,\n                                double* c, lapack_int ldc, const double* d,\n                                lapack_int ldd, const double* e, lapack_int lde,\n                                double* f, lapack_int ldf, double* scale,\n                                double* dif, double* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_ctgsyl_work( int matrix_order, char trans, lapack_int ijob,\n                                lapack_int m, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* c, lapack_int ldc,\n                                const lapack_complex_float* d, lapack_int ldd,\n                                const lapack_complex_float* e, lapack_int lde,\n                                lapack_complex_float* f, lapack_int ldf,\n                                float* scale, float* dif,\n                                lapack_complex_float* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_ztgsyl_work( int matrix_order, char trans, lapack_int ijob,\n                                lapack_int m, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* c, lapack_int ldc,\n                                const lapack_complex_double* d, lapack_int ldd,\n                                const lapack_complex_double* e, lapack_int lde,\n                                lapack_complex_double* f, lapack_int ldf,\n                                double* scale, double* dif,\n                                lapack_complex_double* work, lapack_int lwork,\n                                lapack_int* iwork );\n\nlapack_int LAPACKE_stpcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n, const float* ap,\n                                float* rcond, float* work, lapack_int* iwork );\nlapack_int LAPACKE_dtpcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n, const double* ap,\n                                double* rcond, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_ctpcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n,\n                                const lapack_complex_float* ap, float* rcond,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_ztpcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n,\n                                const lapack_complex_double* ap, double* rcond,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_stprfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const float* ap, const float* b, lapack_int ldb,\n                                const float* x, lapack_int ldx, float* ferr,\n                                float* berr, float* work, lapack_int* iwork );\nlapack_int LAPACKE_dtprfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const double* ap, const double* b,\n                                lapack_int ldb, const double* x, lapack_int ldx,\n                                double* ferr, double* berr, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_ctprfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const lapack_complex_float* ap,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                const lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_ztprfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const lapack_complex_double* ap,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                const lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_stptri_work( int matrix_order, char uplo, char diag,\n                                lapack_int n, float* ap );\nlapack_int LAPACKE_dtptri_work( int matrix_order, char uplo, char diag,\n                                lapack_int n, double* ap );\nlapack_int LAPACKE_ctptri_work( int matrix_order, char uplo, char diag,\n                                lapack_int n, lapack_complex_float* ap );\nlapack_int LAPACKE_ztptri_work( int matrix_order, char uplo, char diag,\n                                lapack_int n, lapack_complex_double* ap );\n\nlapack_int LAPACKE_stptrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const float* ap, float* b, lapack_int ldb );\nlapack_int LAPACKE_dtptrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const double* ap, double* b, lapack_int ldb );\nlapack_int LAPACKE_ctptrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const lapack_complex_float* ap,\n                                lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_ztptrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const lapack_complex_double* ap,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_stpttf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const float* ap, float* arf );\nlapack_int LAPACKE_dtpttf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const double* ap, double* arf );\nlapack_int LAPACKE_ctpttf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const lapack_complex_float* ap,\n                                lapack_complex_float* arf );\nlapack_int LAPACKE_ztpttf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const lapack_complex_double* ap,\n                                lapack_complex_double* arf );\n\nlapack_int LAPACKE_stpttr_work( int matrix_order, char uplo, lapack_int n,\n                                const float* ap, float* a, lapack_int lda );\nlapack_int LAPACKE_dtpttr_work( int matrix_order, char uplo, lapack_int n,\n                                const double* ap, double* a, lapack_int lda );\nlapack_int LAPACKE_ctpttr_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_float* ap,\n                                lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_ztpttr_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_double* ap,\n                                lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_strcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n, const float* a,\n                                lapack_int lda, float* rcond, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dtrcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n, const double* a,\n                                lapack_int lda, double* rcond, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_ctrcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                float* rcond, lapack_complex_float* work,\n                                float* rwork );\nlapack_int LAPACKE_ztrcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                double* rcond, lapack_complex_double* work,\n                                double* rwork );\n\nlapack_int LAPACKE_strevc_work( int matrix_order, char side, char howmny,\n                                lapack_logical* select, lapack_int n,\n                                const float* t, lapack_int ldt, float* vl,\n                                lapack_int ldvl, float* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m, float* work );\nlapack_int LAPACKE_dtrevc_work( int matrix_order, char side, char howmny,\n                                lapack_logical* select, lapack_int n,\n                                const double* t, lapack_int ldt, double* vl,\n                                lapack_int ldvl, double* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m, double* work );\nlapack_int LAPACKE_ctrevc_work( int matrix_order, char side, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                lapack_complex_float* t, lapack_int ldt,\n                                lapack_complex_float* vl, lapack_int ldvl,\n                                lapack_complex_float* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_ztrevc_work( int matrix_order, char side, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                lapack_complex_double* t, lapack_int ldt,\n                                lapack_complex_double* vl, lapack_int ldvl,\n                                lapack_complex_double* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_strexc_work( int matrix_order, char compq, lapack_int n,\n                                float* t, lapack_int ldt, float* q,\n                                lapack_int ldq, lapack_int* ifst,\n                                lapack_int* ilst, float* work );\nlapack_int LAPACKE_dtrexc_work( int matrix_order, char compq, lapack_int n,\n                                double* t, lapack_int ldt, double* q,\n                                lapack_int ldq, lapack_int* ifst,\n                                lapack_int* ilst, double* work );\nlapack_int LAPACKE_ctrexc_work( int matrix_order, char compq, lapack_int n,\n                                lapack_complex_float* t, lapack_int ldt,\n                                lapack_complex_float* q, lapack_int ldq,\n                                lapack_int ifst, lapack_int ilst );\nlapack_int LAPACKE_ztrexc_work( int matrix_order, char compq, lapack_int n,\n                                lapack_complex_double* t, lapack_int ldt,\n                                lapack_complex_double* q, lapack_int ldq,\n                                lapack_int ifst, lapack_int ilst );\n\nlapack_int LAPACKE_strrfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const float* a, lapack_int lda, const float* b,\n                                lapack_int ldb, const float* x, lapack_int ldx,\n                                float* ferr, float* berr, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dtrrfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const double* a, lapack_int lda,\n                                const double* b, lapack_int ldb,\n                                const double* x, lapack_int ldx, double* ferr,\n                                double* berr, double* work, lapack_int* iwork );\nlapack_int LAPACKE_ctrrfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                const lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_ztrrfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                const lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_strsen_work( int matrix_order, char job, char compq,\n                                const lapack_logical* select, lapack_int n,\n                                float* t, lapack_int ldt, float* q,\n                                lapack_int ldq, float* wr, float* wi,\n                                lapack_int* m, float* s, float* sep,\n                                float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_dtrsen_work( int matrix_order, char job, char compq,\n                                const lapack_logical* select, lapack_int n,\n                                double* t, lapack_int ldt, double* q,\n                                lapack_int ldq, double* wr, double* wi,\n                                lapack_int* m, double* s, double* sep,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_ctrsen_work( int matrix_order, char job, char compq,\n                                const lapack_logical* select, lapack_int n,\n                                lapack_complex_float* t, lapack_int ldt,\n                                lapack_complex_float* q, lapack_int ldq,\n                                lapack_complex_float* w, lapack_int* m,\n                                float* s, float* sep,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_ztrsen_work( int matrix_order, char job, char compq,\n                                const lapack_logical* select, lapack_int n,\n                                lapack_complex_double* t, lapack_int ldt,\n                                lapack_complex_double* q, lapack_int ldq,\n                                lapack_complex_double* w, lapack_int* m,\n                                double* s, double* sep,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_strsna_work( int matrix_order, char job, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const float* t, lapack_int ldt, const float* vl,\n                                lapack_int ldvl, const float* vr,\n                                lapack_int ldvr, float* s, float* sep,\n                                lapack_int mm, lapack_int* m, float* work,\n                                lapack_int ldwork, lapack_int* iwork );\nlapack_int LAPACKE_dtrsna_work( int matrix_order, char job, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const double* t, lapack_int ldt,\n                                const double* vl, lapack_int ldvl,\n                                const double* vr, lapack_int ldvr, double* s,\n                                double* sep, lapack_int mm, lapack_int* m,\n                                double* work, lapack_int ldwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_ctrsna_work( int matrix_order, char job, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const lapack_complex_float* t, lapack_int ldt,\n                                const lapack_complex_float* vl, lapack_int ldvl,\n                                const lapack_complex_float* vr, lapack_int ldvr,\n                                float* s, float* sep, lapack_int mm,\n                                lapack_int* m, lapack_complex_float* work,\n                                lapack_int ldwork, float* rwork );\nlapack_int LAPACKE_ztrsna_work( int matrix_order, char job, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const lapack_complex_double* t, lapack_int ldt,\n                                const lapack_complex_double* vl,\n                                lapack_int ldvl,\n                                const lapack_complex_double* vr,\n                                lapack_int ldvr, double* s, double* sep,\n                                lapack_int mm, lapack_int* m,\n                                lapack_complex_double* work, lapack_int ldwork,\n                                double* rwork );\n\nlapack_int LAPACKE_strsyl_work( int matrix_order, char trana, char tranb,\n                                lapack_int isgn, lapack_int m, lapack_int n,\n                                const float* a, lapack_int lda, const float* b,\n                                lapack_int ldb, float* c, lapack_int ldc,\n                                float* scale );\nlapack_int LAPACKE_dtrsyl_work( int matrix_order, char trana, char tranb,\n                                lapack_int isgn, lapack_int m, lapack_int n,\n                                const double* a, lapack_int lda,\n                                const double* b, lapack_int ldb, double* c,\n                                lapack_int ldc, double* scale );\nlapack_int LAPACKE_ctrsyl_work( int matrix_order, char trana, char tranb,\n                                lapack_int isgn, lapack_int m, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* c, lapack_int ldc,\n                                float* scale );\nlapack_int LAPACKE_ztrsyl_work( int matrix_order, char trana, char tranb,\n                                lapack_int isgn, lapack_int m, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* c, lapack_int ldc,\n                                double* scale );\n\nlapack_int LAPACKE_strtri_work( int matrix_order, char uplo, char diag,\n                                lapack_int n, float* a, lapack_int lda );\nlapack_int LAPACKE_dtrtri_work( int matrix_order, char uplo, char diag,\n                                lapack_int n, double* a, lapack_int lda );\nlapack_int LAPACKE_ctrtri_work( int matrix_order, char uplo, char diag,\n                                lapack_int n, lapack_complex_float* a,\n                                lapack_int lda );\nlapack_int LAPACKE_ztrtri_work( int matrix_order, char uplo, char diag,\n                                lapack_int n, lapack_complex_double* a,\n                                lapack_int lda );\n\nlapack_int LAPACKE_strtrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const float* a, lapack_int lda, float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_dtrtrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const double* a, lapack_int lda, double* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_ctrtrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_ztrtrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_strttf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const float* a, lapack_int lda,\n                                float* arf );\nlapack_int LAPACKE_dtrttf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const double* a, lapack_int lda,\n                                double* arf );\nlapack_int LAPACKE_ctrttf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* arf );\nlapack_int LAPACKE_ztrttf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* arf );\n\nlapack_int LAPACKE_strttp_work( int matrix_order, char uplo, lapack_int n,\n                                const float* a, lapack_int lda, float* ap );\nlapack_int LAPACKE_dtrttp_work( int matrix_order, char uplo, lapack_int n,\n                                const double* a, lapack_int lda, double* ap );\nlapack_int LAPACKE_ctrttp_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* ap );\nlapack_int LAPACKE_ztrttp_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* ap );\n\nlapack_int LAPACKE_stzrzf_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, float* tau,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dtzrzf_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, double* tau,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_ctzrzf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_ztzrzf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cungbr_work( int matrix_order, char vect, lapack_int m,\n                                lapack_int n, lapack_int k,\n                                lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zungbr_work( int matrix_order, char vect, lapack_int m,\n                                lapack_int n, lapack_int k,\n                                lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cunghr_work( int matrix_order, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zunghr_work( int matrix_order, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, lapack_complex_double* a,\n                                lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cunglq_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zunglq_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, lapack_complex_double* a,\n                                lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cungql_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zungql_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, lapack_complex_double* a,\n                                lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cungqr_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zungqr_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, lapack_complex_double* a,\n                                lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cungrq_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zungrq_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, lapack_complex_double* a,\n                                lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cungtr_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zungtr_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cunmbr_work( int matrix_order, char vect, char side,\n                                char trans, lapack_int m, lapack_int n,\n                                lapack_int k, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zunmbr_work( int matrix_order, char vect, char side,\n                                char trans, lapack_int m, lapack_int n,\n                                lapack_int k, const lapack_complex_double* a,\n                                lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cunmhr_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zunmhr_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, const lapack_complex_double* a,\n                                lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cunmlq_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zunmlq_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cunmql_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zunmql_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cunmqr_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zunmqr_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cunmrq_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zunmrq_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cunmrz_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                lapack_int l, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zunmrz_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                lapack_int l, const lapack_complex_double* a,\n                                lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cunmtr_work( int matrix_order, char side, char uplo,\n                                char trans, lapack_int m, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zunmtr_work( int matrix_order, char side, char uplo,\n                                char trans, lapack_int m, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cupgtr_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_float* ap,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* q, lapack_int ldq,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zupgtr_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_double* ap,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* q, lapack_int ldq,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_cupmtr_work( int matrix_order, char side, char uplo,\n                                char trans, lapack_int m, lapack_int n,\n                                const lapack_complex_float* ap,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zupmtr_work( int matrix_order, char side, char uplo,\n                                char trans, lapack_int m, lapack_int n,\n                                const lapack_complex_double* ap,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_claghe( int matrix_order, lapack_int n, lapack_int k,\n                           const float* d, lapack_complex_float* a,\n                           lapack_int lda, lapack_int* iseed );\nlapack_int LAPACKE_zlaghe( int matrix_order, lapack_int n, lapack_int k,\n                           const double* d, lapack_complex_double* a,\n                           lapack_int lda, lapack_int* iseed );\n\nlapack_int LAPACKE_slagsy( int matrix_order, lapack_int n, lapack_int k,\n                           const float* d, float* a, lapack_int lda,\n                           lapack_int* iseed );\nlapack_int LAPACKE_dlagsy( int matrix_order, lapack_int n, lapack_int k,\n                           const double* d, double* a, lapack_int lda,\n                           lapack_int* iseed );\nlapack_int LAPACKE_clagsy( int matrix_order, lapack_int n, lapack_int k,\n                           const float* d, lapack_complex_float* a,\n                           lapack_int lda, lapack_int* iseed );\nlapack_int LAPACKE_zlagsy( int matrix_order, lapack_int n, lapack_int k,\n                           const double* d, lapack_complex_double* a,\n                           lapack_int lda, lapack_int* iseed );\n\nlapack_int LAPACKE_slapmr( int matrix_order, lapack_logical forwrd,\n                           lapack_int m, lapack_int n, float* x, lapack_int ldx,\n                           lapack_int* k );\nlapack_int LAPACKE_dlapmr( int matrix_order, lapack_logical forwrd,\n                           lapack_int m, lapack_int n, double* x,\n                           lapack_int ldx, lapack_int* k );\nlapack_int LAPACKE_clapmr( int matrix_order, lapack_logical forwrd,\n                           lapack_int m, lapack_int n, lapack_complex_float* x,\n                           lapack_int ldx, lapack_int* k );\nlapack_int LAPACKE_zlapmr( int matrix_order, lapack_logical forwrd,\n                           lapack_int m, lapack_int n, lapack_complex_double* x,\n                           lapack_int ldx, lapack_int* k );\n\n\nfloat LAPACKE_slapy2( float x, float y );\ndouble LAPACKE_dlapy2( double x, double y );\n\nfloat LAPACKE_slapy3( float x, float y, float z );\ndouble LAPACKE_dlapy3( double x, double y, double z );\n\nlapack_int LAPACKE_slartgp( float f, float g, float* cs, float* sn, float* r );\nlapack_int LAPACKE_dlartgp( double f, double g, double* cs, double* sn,\n                            double* r );\n\nlapack_int LAPACKE_slartgs( float x, float y, float sigma, float* cs,\n                            float* sn );\nlapack_int LAPACKE_dlartgs( double x, double y, double sigma, double* cs,\n                            double* sn );\n\n\n//LAPACK 3.3.0\nlapack_int LAPACKE_cbbcsd( int matrix_order, char jobu1, char jobu2,\n                           char jobv1t, char jobv2t, char trans, lapack_int m,\n                           lapack_int p, lapack_int q, float* theta, float* phi,\n                           lapack_complex_float* u1, lapack_int ldu1,\n                           lapack_complex_float* u2, lapack_int ldu2,\n                           lapack_complex_float* v1t, lapack_int ldv1t,\n                           lapack_complex_float* v2t, lapack_int ldv2t,\n                           float* b11d, float* b11e, float* b12d, float* b12e,\n                           float* b21d, float* b21e, float* b22d, float* b22e );\nlapack_int LAPACKE_cbbcsd_work( int matrix_order, char jobu1, char jobu2,\n                                char jobv1t, char jobv2t, char trans,\n                                lapack_int m, lapack_int p, lapack_int q,\n                                float* theta, float* phi,\n                                lapack_complex_float* u1, lapack_int ldu1,\n                                lapack_complex_float* u2, lapack_int ldu2,\n                                lapack_complex_float* v1t, lapack_int ldv1t,\n                                lapack_complex_float* v2t, lapack_int ldv2t,\n                                float* b11d, float* b11e, float* b12d,\n                                float* b12e, float* b21d, float* b21e,\n                                float* b22d, float* b22e, float* rwork,\n                                lapack_int lrwork );\nlapack_int LAPACKE_cheswapr( int matrix_order, char uplo, lapack_int n,\n                             lapack_complex_float* a, lapack_int i1,\n                             lapack_int i2 );\nlapack_int LAPACKE_cheswapr_work( int matrix_order, char uplo, lapack_int n,\n                                  lapack_complex_float* a, lapack_int i1,\n                                  lapack_int i2 );\nlapack_int LAPACKE_chetri2( int matrix_order, char uplo, lapack_int n,\n                            lapack_complex_float* a, lapack_int lda,\n                            const lapack_int* ipiv );\nlapack_int LAPACKE_chetri2_work( int matrix_order, char uplo, lapack_int n,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 const lapack_int* ipiv,\n                                 lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_chetri2x( int matrix_order, char uplo, lapack_int n,\n                             lapack_complex_float* a, lapack_int lda,\n                             const lapack_int* ipiv, lapack_int nb );\nlapack_int LAPACKE_chetri2x_work( int matrix_order, char uplo, lapack_int n,\n                                  lapack_complex_float* a, lapack_int lda,\n                                  const lapack_int* ipiv,\n                                  lapack_complex_float* work, lapack_int nb );\nlapack_int LAPACKE_chetrs2( int matrix_order, char uplo, lapack_int n,\n                            lapack_int nrhs, const lapack_complex_float* a,\n                            lapack_int lda, const lapack_int* ipiv,\n                            lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_chetrs2_work( int matrix_order, char uplo, lapack_int n,\n                                 lapack_int nrhs, const lapack_complex_float* a,\n                                 lapack_int lda, const lapack_int* ipiv,\n                                 lapack_complex_float* b, lapack_int ldb,\n                                 lapack_complex_float* work );\nlapack_int LAPACKE_csyconv( int matrix_order, char uplo, char way, lapack_int n,\n                            lapack_complex_float* a, lapack_int lda,\n                            const lapack_int* ipiv );\nlapack_int LAPACKE_csyconv_work( int matrix_order, char uplo, char way,\n                                 lapack_int n, lapack_complex_float* a,\n                                 lapack_int lda, const lapack_int* ipiv,\n                                 lapack_complex_float* work );\nlapack_int LAPACKE_csyswapr( int matrix_order, char uplo, lapack_int n,\n                             lapack_complex_float* a, lapack_int i1,\n                             lapack_int i2 );\nlapack_int LAPACKE_csyswapr_work( int matrix_order, char uplo, lapack_int n,\n                                  lapack_complex_float* a, lapack_int i1,\n                                  lapack_int i2 );\nlapack_int LAPACKE_csytri2( int matrix_order, char uplo, lapack_int n,\n                            lapack_complex_float* a, lapack_int lda,\n                            const lapack_int* ipiv );\nlapack_int LAPACKE_csytri2_work( int matrix_order, char uplo, lapack_int n,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 const lapack_int* ipiv,\n                                 lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_csytri2x( int matrix_order, char uplo, lapack_int n,\n                             lapack_complex_float* a, lapack_int lda,\n                             const lapack_int* ipiv, lapack_int nb );\nlapack_int LAPACKE_csytri2x_work( int matrix_order, char uplo, lapack_int n,\n                                  lapack_complex_float* a, lapack_int lda,\n                                  const lapack_int* ipiv,\n                                  lapack_complex_float* work, lapack_int nb );\nlapack_int LAPACKE_csytrs2( int matrix_order, char uplo, lapack_int n,\n                            lapack_int nrhs, const lapack_complex_float* a,\n                            lapack_int lda, const lapack_int* ipiv,\n                            lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_csytrs2_work( int matrix_order, char uplo, lapack_int n,\n                                 lapack_int nrhs, const lapack_complex_float* a,\n                                 lapack_int lda, const lapack_int* ipiv,\n                                 lapack_complex_float* b, lapack_int ldb,\n                                 lapack_complex_float* work );\nlapack_int LAPACKE_cunbdb( int matrix_order, char trans, char signs,\n                           lapack_int m, lapack_int p, lapack_int q,\n                           lapack_complex_float* x11, lapack_int ldx11,\n                           lapack_complex_float* x12, lapack_int ldx12,\n                           lapack_complex_float* x21, lapack_int ldx21,\n                           lapack_complex_float* x22, lapack_int ldx22,\n                           float* theta, float* phi,\n                           lapack_complex_float* taup1,\n                           lapack_complex_float* taup2,\n                           lapack_complex_float* tauq1,\n                           lapack_complex_float* tauq2 );\nlapack_int LAPACKE_cunbdb_work( int matrix_order, char trans, char signs,\n                                lapack_int m, lapack_int p, lapack_int q,\n                                lapack_complex_float* x11, lapack_int ldx11,\n                                lapack_complex_float* x12, lapack_int ldx12,\n                                lapack_complex_float* x21, lapack_int ldx21,\n                                lapack_complex_float* x22, lapack_int ldx22,\n                                float* theta, float* phi,\n                                lapack_complex_float* taup1,\n                                lapack_complex_float* taup2,\n                                lapack_complex_float* tauq1,\n                                lapack_complex_float* tauq2,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_cuncsd( int matrix_order, char jobu1, char jobu2,\n                           char jobv1t, char jobv2t, char trans, char signs,\n                           lapack_int m, lapack_int p, lapack_int q,\n                           lapack_complex_float* x11, lapack_int ldx11,\n                           lapack_complex_float* x12, lapack_int ldx12,\n                           lapack_complex_float* x21, lapack_int ldx21,\n                           lapack_complex_float* x22, lapack_int ldx22,\n                           float* theta, lapack_complex_float* u1,\n                           lapack_int ldu1, lapack_complex_float* u2,\n                           lapack_int ldu2, lapack_complex_float* v1t,\n                           lapack_int ldv1t, lapack_complex_float* v2t,\n                           lapack_int ldv2t );\nlapack_int LAPACKE_cuncsd_work( int matrix_order, char jobu1, char jobu2,\n                                char jobv1t, char jobv2t, char trans,\n                                char signs, lapack_int m, lapack_int p,\n                                lapack_int q, lapack_complex_float* x11,\n                                lapack_int ldx11, lapack_complex_float* x12,\n                                lapack_int ldx12, lapack_complex_float* x21,\n                                lapack_int ldx21, lapack_complex_float* x22,\n                                lapack_int ldx22, float* theta,\n                                lapack_complex_float* u1, lapack_int ldu1,\n                                lapack_complex_float* u2, lapack_int ldu2,\n                                lapack_complex_float* v1t, lapack_int ldv1t,\n                                lapack_complex_float* v2t, lapack_int ldv2t,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork, lapack_int lrwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dbbcsd( int matrix_order, char jobu1, char jobu2,\n                           char jobv1t, char jobv2t, char trans, lapack_int m,\n                           lapack_int p, lapack_int q, double* theta,\n                           double* phi, double* u1, lapack_int ldu1, double* u2,\n                           lapack_int ldu2, double* v1t, lapack_int ldv1t,\n                           double* v2t, lapack_int ldv2t, double* b11d,\n                           double* b11e, double* b12d, double* b12e,\n                           double* b21d, double* b21e, double* b22d,\n                           double* b22e );\nlapack_int LAPACKE_dbbcsd_work( int matrix_order, char jobu1, char jobu2,\n                                char jobv1t, char jobv2t, char trans,\n                                lapack_int m, lapack_int p, lapack_int q,\n                                double* theta, double* phi, double* u1,\n                                lapack_int ldu1, double* u2, lapack_int ldu2,\n                                double* v1t, lapack_int ldv1t, double* v2t,\n                                lapack_int ldv2t, double* b11d, double* b11e,\n                                double* b12d, double* b12e, double* b21d,\n                                double* b21e, double* b22d, double* b22e,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_dorbdb( int matrix_order, char trans, char signs,\n                           lapack_int m, lapack_int p, lapack_int q,\n                           double* x11, lapack_int ldx11, double* x12,\n                           lapack_int ldx12, double* x21, lapack_int ldx21,\n                           double* x22, lapack_int ldx22, double* theta,\n                           double* phi, double* taup1, double* taup2,\n                           double* tauq1, double* tauq2 );\nlapack_int LAPACKE_dorbdb_work( int matrix_order, char trans, char signs,\n                                lapack_int m, lapack_int p, lapack_int q,\n                                double* x11, lapack_int ldx11, double* x12,\n                                lapack_int ldx12, double* x21, lapack_int ldx21,\n                                double* x22, lapack_int ldx22, double* theta,\n                                double* phi, double* taup1, double* taup2,\n                                double* tauq1, double* tauq2, double* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dorcsd( int matrix_order, char jobu1, char jobu2,\n                           char jobv1t, char jobv2t, char trans, char signs,\n                           lapack_int m, lapack_int p, lapack_int q,\n                           double* x11, lapack_int ldx11, double* x12,\n                           lapack_int ldx12, double* x21, lapack_int ldx21,\n                           double* x22, lapack_int ldx22, double* theta,\n                           double* u1, lapack_int ldu1, double* u2,\n                           lapack_int ldu2, double* v1t, lapack_int ldv1t,\n                           double* v2t, lapack_int ldv2t );\nlapack_int LAPACKE_dorcsd_work( int matrix_order, char jobu1, char jobu2,\n                                char jobv1t, char jobv2t, char trans,\n                                char signs, lapack_int m, lapack_int p,\n                                lapack_int q, double* x11, lapack_int ldx11,\n                                double* x12, lapack_int ldx12, double* x21,\n                                lapack_int ldx21, double* x22, lapack_int ldx22,\n                                double* theta, double* u1, lapack_int ldu1,\n                                double* u2, lapack_int ldu2, double* v1t,\n                                lapack_int ldv1t, double* v2t, lapack_int ldv2t,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dsyconv( int matrix_order, char uplo, char way, lapack_int n,\n                            double* a, lapack_int lda, const lapack_int* ipiv );\nlapack_int LAPACKE_dsyconv_work( int matrix_order, char uplo, char way,\n                                 lapack_int n, double* a, lapack_int lda,\n                                 const lapack_int* ipiv, double* work );\nlapack_int LAPACKE_dsyswapr( int matrix_order, char uplo, lapack_int n,\n                             double* a, lapack_int i1, lapack_int i2 );\nlapack_int LAPACKE_dsyswapr_work( int matrix_order, char uplo, lapack_int n,\n                                  double* a, lapack_int i1, lapack_int i2 );\nlapack_int LAPACKE_dsytri2( int matrix_order, char uplo, lapack_int n,\n                            double* a, lapack_int lda, const lapack_int* ipiv );\nlapack_int LAPACKE_dsytri2_work( int matrix_order, char uplo, lapack_int n,\n                                 double* a, lapack_int lda,\n                                 const lapack_int* ipiv,\n                                 lapack_complex_double* work, lapack_int lwork );\nlapack_int LAPACKE_dsytri2x( int matrix_order, char uplo, lapack_int n,\n                             double* a, lapack_int lda, const lapack_int* ipiv,\n                             lapack_int nb );\nlapack_int LAPACKE_dsytri2x_work( int matrix_order, char uplo, lapack_int n,\n                                  double* a, lapack_int lda,\n                                  const lapack_int* ipiv, double* work,\n                                  lapack_int nb );\nlapack_int LAPACKE_dsytrs2( int matrix_order, char uplo, lapack_int n,\n                            lapack_int nrhs, const double* a, lapack_int lda,\n                            const lapack_int* ipiv, double* b, lapack_int ldb );\nlapack_int LAPACKE_dsytrs2_work( int matrix_order, char uplo, lapack_int n,\n                                 lapack_int nrhs, const double* a,\n                                 lapack_int lda, const lapack_int* ipiv,\n                                 double* b, lapack_int ldb, double* work );\nlapack_int LAPACKE_sbbcsd( int matrix_order, char jobu1, char jobu2,\n                           char jobv1t, char jobv2t, char trans, lapack_int m,\n                           lapack_int p, lapack_int q, float* theta, float* phi,\n                           float* u1, lapack_int ldu1, float* u2,\n                           lapack_int ldu2, float* v1t, lapack_int ldv1t,\n                           float* v2t, lapack_int ldv2t, float* b11d,\n                           float* b11e, float* b12d, float* b12e, float* b21d,\n                           float* b21e, float* b22d, float* b22e );\nlapack_int LAPACKE_sbbcsd_work( int matrix_order, char jobu1, char jobu2,\n                                char jobv1t, char jobv2t, char trans,\n                                lapack_int m, lapack_int p, lapack_int q,\n                                float* theta, float* phi, float* u1,\n                                lapack_int ldu1, float* u2, lapack_int ldu2,\n                                float* v1t, lapack_int ldv1t, float* v2t,\n                                lapack_int ldv2t, float* b11d, float* b11e,\n                                float* b12d, float* b12e, float* b21d,\n                                float* b21e, float* b22d, float* b22e,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_sorbdb( int matrix_order, char trans, char signs,\n                           lapack_int m, lapack_int p, lapack_int q, float* x11,\n                           lapack_int ldx11, float* x12, lapack_int ldx12,\n                           float* x21, lapack_int ldx21, float* x22,\n                           lapack_int ldx22, float* theta, float* phi,\n                           float* taup1, float* taup2, float* tauq1,\n                           float* tauq2 );\nlapack_int LAPACKE_sorbdb_work( int matrix_order, char trans, char signs,\n                                lapack_int m, lapack_int p, lapack_int q,\n                                float* x11, lapack_int ldx11, float* x12,\n                                lapack_int ldx12, float* x21, lapack_int ldx21,\n                                float* x22, lapack_int ldx22, float* theta,\n                                float* phi, float* taup1, float* taup2,\n                                float* tauq1, float* tauq2, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_sorcsd( int matrix_order, char jobu1, char jobu2,\n                           char jobv1t, char jobv2t, char trans, char signs,\n                           lapack_int m, lapack_int p, lapack_int q, float* x11,\n                           lapack_int ldx11, float* x12, lapack_int ldx12,\n                           float* x21, lapack_int ldx21, float* x22,\n                           lapack_int ldx22, float* theta, float* u1,\n                           lapack_int ldu1, float* u2, lapack_int ldu2,\n                           float* v1t, lapack_int ldv1t, float* v2t,\n                           lapack_int ldv2t );\nlapack_int LAPACKE_sorcsd_work( int matrix_order, char jobu1, char jobu2,\n                                char jobv1t, char jobv2t, char trans,\n                                char signs, lapack_int m, lapack_int p,\n                                lapack_int q, float* x11, lapack_int ldx11,\n                                float* x12, lapack_int ldx12, float* x21,\n                                lapack_int ldx21, float* x22, lapack_int ldx22,\n                                float* theta, float* u1, lapack_int ldu1,\n                                float* u2, lapack_int ldu2, float* v1t,\n                                lapack_int ldv1t, float* v2t, lapack_int ldv2t,\n                                float* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_ssyconv( int matrix_order, char uplo, char way, lapack_int n,\n                            float* a, lapack_int lda, const lapack_int* ipiv );\nlapack_int LAPACKE_ssyconv_work( int matrix_order, char uplo, char way,\n                                 lapack_int n, float* a, lapack_int lda,\n                                 const lapack_int* ipiv, float* work );\nlapack_int LAPACKE_ssyswapr( int matrix_order, char uplo, lapack_int n,\n                             float* a, lapack_int i1, lapack_int i2 );\nlapack_int LAPACKE_ssyswapr_work( int matrix_order, char uplo, lapack_int n,\n                                  float* a, lapack_int i1, lapack_int i2 );\nlapack_int LAPACKE_ssytri2( int matrix_order, char uplo, lapack_int n, float* a,\n                            lapack_int lda, const lapack_int* ipiv );\nlapack_int LAPACKE_ssytri2_work( int matrix_order, char uplo, lapack_int n,\n                                 float* a, lapack_int lda,\n                                 const lapack_int* ipiv,\n                                 lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_ssytri2x( int matrix_order, char uplo, lapack_int n,\n                             float* a, lapack_int lda, const lapack_int* ipiv,\n                             lapack_int nb );\nlapack_int LAPACKE_ssytri2x_work( int matrix_order, char uplo, lapack_int n,\n                                  float* a, lapack_int lda,\n                                  const lapack_int* ipiv, float* work,\n                                  lapack_int nb );\nlapack_int LAPACKE_ssytrs2( int matrix_order, char uplo, lapack_int n,\n                            lapack_int nrhs, const float* a, lapack_int lda,\n                            const lapack_int* ipiv, float* b, lapack_int ldb );\nlapack_int LAPACKE_ssytrs2_work( int matrix_order, char uplo, lapack_int n,\n                                 lapack_int nrhs, const float* a,\n                                 lapack_int lda, const lapack_int* ipiv,\n                                 float* b, lapack_int ldb, float* work );\nlapack_int LAPACKE_zbbcsd( int matrix_order, char jobu1, char jobu2,\n                           char jobv1t, char jobv2t, char trans, lapack_int m,\n                           lapack_int p, lapack_int q, double* theta,\n                           double* phi, lapack_complex_double* u1,\n                           lapack_int ldu1, lapack_complex_double* u2,\n                           lapack_int ldu2, lapack_complex_double* v1t,\n                           lapack_int ldv1t, lapack_complex_double* v2t,\n                           lapack_int ldv2t, double* b11d, double* b11e,\n                           double* b12d, double* b12e, double* b21d,\n                           double* b21e, double* b22d, double* b22e );\nlapack_int LAPACKE_zbbcsd_work( int matrix_order, char jobu1, char jobu2,\n                                char jobv1t, char jobv2t, char trans,\n                                lapack_int m, lapack_int p, lapack_int q,\n                                double* theta, double* phi,\n                                lapack_complex_double* u1, lapack_int ldu1,\n                                lapack_complex_double* u2, lapack_int ldu2,\n                                lapack_complex_double* v1t, lapack_int ldv1t,\n                                lapack_complex_double* v2t, lapack_int ldv2t,\n                                double* b11d, double* b11e, double* b12d,\n                                double* b12e, double* b21d, double* b21e,\n                                double* b22d, double* b22e, double* rwork,\n                                lapack_int lrwork );\nlapack_int LAPACKE_zheswapr( int matrix_order, char uplo, lapack_int n,\n                             lapack_complex_double* a, lapack_int i1,\n                             lapack_int i2 );\nlapack_int LAPACKE_zheswapr_work( int matrix_order, char uplo, lapack_int n,\n                                  lapack_complex_double* a, lapack_int i1,\n                                  lapack_int i2 );\nlapack_int LAPACKE_zhetri2( int matrix_order, char uplo, lapack_int n,\n                            lapack_complex_double* a, lapack_int lda,\n                            const lapack_int* ipiv );\nlapack_int LAPACKE_zhetri2_work( int matrix_order, char uplo, lapack_int n,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 const lapack_int* ipiv,\n                                 lapack_complex_double* work, lapack_int lwork );\nlapack_int LAPACKE_zhetri2x( int matrix_order, char uplo, lapack_int n,\n                             lapack_complex_double* a, lapack_int lda,\n                             const lapack_int* ipiv, lapack_int nb );\nlapack_int LAPACKE_zhetri2x_work( int matrix_order, char uplo, lapack_int n,\n                                  lapack_complex_double* a, lapack_int lda,\n                                  const lapack_int* ipiv,\n                                  lapack_complex_double* work, lapack_int nb );\nlapack_int LAPACKE_zhetrs2( int matrix_order, char uplo, lapack_int n,\n                            lapack_int nrhs, const lapack_complex_double* a,\n                            lapack_int lda, const lapack_int* ipiv,\n                            lapack_complex_double* b, lapack_int ldb );\nlapack_int LAPACKE_zhetrs2_work( int matrix_order, char uplo, lapack_int n,\n                                 lapack_int nrhs, const lapack_complex_double* a,\n                                 lapack_int lda, const lapack_int* ipiv,\n                                 lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* work );\nlapack_int LAPACKE_zsyconv( int matrix_order, char uplo, char way, lapack_int n,\n                            lapack_complex_double* a, lapack_int lda,\n                            const lapack_int* ipiv );\nlapack_int LAPACKE_zsyconv_work( int matrix_order, char uplo, char way,\n                                 lapack_int n, lapack_complex_double* a,\n                                 lapack_int lda, const lapack_int* ipiv,\n                                 lapack_complex_double* work );\nlapack_int LAPACKE_zsyswapr( int matrix_order, char uplo, lapack_int n,\n                             lapack_complex_double* a, lapack_int i1,\n                             lapack_int i2 );\nlapack_int LAPACKE_zsyswapr_work( int matrix_order, char uplo, lapack_int n,\n                                  lapack_complex_double* a, lapack_int i1,\n                                  lapack_int i2 );\nlapack_int LAPACKE_zsytri2( int matrix_order, char uplo, lapack_int n,\n                            lapack_complex_double* a, lapack_int lda,\n                            const lapack_int* ipiv );\nlapack_int LAPACKE_zsytri2_work( int matrix_order, char uplo, lapack_int n,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 const lapack_int* ipiv,\n                                 lapack_complex_double* work, lapack_int lwork );\nlapack_int LAPACKE_zsytri2x( int matrix_order, char uplo, lapack_int n,\n                             lapack_complex_double* a, lapack_int lda,\n                             const lapack_int* ipiv, lapack_int nb );\nlapack_int LAPACKE_zsytri2x_work( int matrix_order, char uplo, lapack_int n,\n                                  lapack_complex_double* a, lapack_int lda,\n                                  const lapack_int* ipiv,\n                                  lapack_complex_double* work, lapack_int nb );\nlapack_int LAPACKE_zsytrs2( int matrix_order, char uplo, lapack_int n,\n                            lapack_int nrhs, const lapack_complex_double* a,\n                            lapack_int lda, const lapack_int* ipiv,\n                            lapack_complex_double* b, lapack_int ldb );\nlapack_int LAPACKE_zsytrs2_work( int matrix_order, char uplo, lapack_int n,\n                                 lapack_int nrhs, const lapack_complex_double* a,\n                                 lapack_int lda, const lapack_int* ipiv,\n                                 lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* work );\nlapack_int LAPACKE_zunbdb( int matrix_order, char trans, char signs,\n                           lapack_int m, lapack_int p, lapack_int q,\n                           lapack_complex_double* x11, lapack_int ldx11,\n                           lapack_complex_double* x12, lapack_int ldx12,\n                           lapack_complex_double* x21, lapack_int ldx21,\n                           lapack_complex_double* x22, lapack_int ldx22,\n                           double* theta, double* phi,\n                           lapack_complex_double* taup1,\n                           lapack_complex_double* taup2,\n                           lapack_complex_double* tauq1,\n                           lapack_complex_double* tauq2 );\nlapack_int LAPACKE_zunbdb_work( int matrix_order, char trans, char signs,\n                                lapack_int m, lapack_int p, lapack_int q,\n                                lapack_complex_double* x11, lapack_int ldx11,\n                                lapack_complex_double* x12, lapack_int ldx12,\n                                lapack_complex_double* x21, lapack_int ldx21,\n                                lapack_complex_double* x22, lapack_int ldx22,\n                                double* theta, double* phi,\n                                lapack_complex_double* taup1,\n                                lapack_complex_double* taup2,\n                                lapack_complex_double* tauq1,\n                                lapack_complex_double* tauq2,\n                                lapack_complex_double* work, lapack_int lwork );\nlapack_int LAPACKE_zuncsd( int matrix_order, char jobu1, char jobu2,\n                           char jobv1t, char jobv2t, char trans, char signs,\n                           lapack_int m, lapack_int p, lapack_int q,\n                           lapack_complex_double* x11, lapack_int ldx11,\n                           lapack_complex_double* x12, lapack_int ldx12,\n                           lapack_complex_double* x21, lapack_int ldx21,\n                           lapack_complex_double* x22, lapack_int ldx22,\n                           double* theta, lapack_complex_double* u1,\n                           lapack_int ldu1, lapack_complex_double* u2,\n                           lapack_int ldu2, lapack_complex_double* v1t,\n                           lapack_int ldv1t, lapack_complex_double* v2t,\n                           lapack_int ldv2t );\nlapack_int LAPACKE_zuncsd_work( int matrix_order, char jobu1, char jobu2,\n                                char jobv1t, char jobv2t, char trans,\n                                char signs, lapack_int m, lapack_int p,\n                                lapack_int q, lapack_complex_double* x11,\n                                lapack_int ldx11, lapack_complex_double* x12,\n                                lapack_int ldx12, lapack_complex_double* x21,\n                                lapack_int ldx21, lapack_complex_double* x22,\n                                lapack_int ldx22, double* theta,\n                                lapack_complex_double* u1, lapack_int ldu1,\n                                lapack_complex_double* u2, lapack_int ldu2,\n                                lapack_complex_double* v1t, lapack_int ldv1t,\n                                lapack_complex_double* v2t, lapack_int ldv2t,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork, lapack_int lrwork,\n                                lapack_int* iwork );\n//LAPACK 3.4.0\nlapack_int LAPACKE_sgemqrt( int matrix_order, char side, char trans,\n                            lapack_int m, lapack_int n, lapack_int k,\n                            lapack_int nb, const float* v, lapack_int ldv,\n                            const float* t, lapack_int ldt, float* c,\n                            lapack_int ldc );\nlapack_int LAPACKE_dgemqrt( int matrix_order, char side, char trans,\n                            lapack_int m, lapack_int n, lapack_int k,\n                            lapack_int nb, const double* v, lapack_int ldv,\n                            const double* t, lapack_int ldt, double* c,\n                            lapack_int ldc );\nlapack_int LAPACKE_cgemqrt( int matrix_order, char side, char trans,\n                            lapack_int m, lapack_int n, lapack_int k,\n                            lapack_int nb, const lapack_complex_float* v,\n                            lapack_int ldv, const lapack_complex_float* t,\n                            lapack_int ldt, lapack_complex_float* c,\n                            lapack_int ldc );\nlapack_int LAPACKE_zgemqrt( int matrix_order, char side, char trans,\n                            lapack_int m, lapack_int n, lapack_int k,\n                            lapack_int nb, const lapack_complex_double* v,\n                            lapack_int ldv, const lapack_complex_double* t,\n                            lapack_int ldt, lapack_complex_double* c,\n                            lapack_int ldc );\n\nlapack_int LAPACKE_sgeqrt( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nb, float* a, lapack_int lda, float* t,\n                           lapack_int ldt );\nlapack_int LAPACKE_dgeqrt( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nb, double* a, lapack_int lda, double* t,\n                           lapack_int ldt );\nlapack_int LAPACKE_cgeqrt( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nb, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* t,\n                           lapack_int ldt );\nlapack_int LAPACKE_zgeqrt( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nb, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* t,\n                           lapack_int ldt );\n\nlapack_int LAPACKE_sgeqrt2( int matrix_order, lapack_int m, lapack_int n,\n                            float* a, lapack_int lda, float* t,\n                            lapack_int ldt );\nlapack_int LAPACKE_dgeqrt2( int matrix_order, lapack_int m, lapack_int n,\n                            double* a, lapack_int lda, double* t,\n                            lapack_int ldt );\nlapack_int LAPACKE_cgeqrt2( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_complex_float* a, lapack_int lda,\n                            lapack_complex_float* t, lapack_int ldt );\nlapack_int LAPACKE_zgeqrt2( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_complex_double* a, lapack_int lda,\n                            lapack_complex_double* t, lapack_int ldt );\n\nlapack_int LAPACKE_sgeqrt3( int matrix_order, lapack_int m, lapack_int n,\n                            float* a, lapack_int lda, float* t,\n                            lapack_int ldt );\nlapack_int LAPACKE_dgeqrt3( int matrix_order, lapack_int m, lapack_int n,\n                            double* a, lapack_int lda, double* t,\n                            lapack_int ldt );\nlapack_int LAPACKE_cgeqrt3( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_complex_float* a, lapack_int lda,\n                            lapack_complex_float* t, lapack_int ldt );\nlapack_int LAPACKE_zgeqrt3( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_complex_double* a, lapack_int lda,\n                            lapack_complex_double* t, lapack_int ldt );\n\nlapack_int LAPACKE_stpmqrt( int matrix_order, char side, char trans,\n                            lapack_int m, lapack_int n, lapack_int k,\n                            lapack_int l, lapack_int nb, const float* v,\n                            lapack_int ldv, const float* t, lapack_int ldt,\n                            float* a, lapack_int lda, float* b,\n                            lapack_int ldb );\nlapack_int LAPACKE_dtpmqrt( int matrix_order, char side, char trans,\n                            lapack_int m, lapack_int n, lapack_int k,\n                            lapack_int l, lapack_int nb, const double* v,\n                            lapack_int ldv, const double* t, lapack_int ldt,\n                            double* a, lapack_int lda, double* b,\n                            lapack_int ldb );\nlapack_int LAPACKE_ctpmqrt( int matrix_order, char side, char trans,\n                            lapack_int m, lapack_int n, lapack_int k,\n                            lapack_int l, lapack_int nb,\n                            const lapack_complex_float* v, lapack_int ldv,\n                            const lapack_complex_float* t, lapack_int ldt,\n                            lapack_complex_float* a, lapack_int lda,\n                            lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_ztpmqrt( int matrix_order, char side, char trans,\n                            lapack_int m, lapack_int n, lapack_int k,\n                            lapack_int l, lapack_int nb,\n                            const lapack_complex_double* v, lapack_int ldv,\n                            const lapack_complex_double* t, lapack_int ldt,\n                            lapack_complex_double* a, lapack_int lda,\n                            lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_dtpqrt( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int l, lapack_int nb, double* a,\n                           lapack_int lda, double* b, lapack_int ldb, double* t,\n                           lapack_int ldt );\nlapack_int LAPACKE_ctpqrt( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int l, lapack_int nb,\n                           lapack_complex_float* a, lapack_int lda, \n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* t, lapack_int ldt );\nlapack_int LAPACKE_ztpqrt( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int l, lapack_int nb,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* t, lapack_int ldt );\n\nlapack_int LAPACKE_stpqrt2( int matrix_order,\n                            lapack_int m, lapack_int n, lapack_int l,\n                            float* a, lapack_int lda,\n                            float* b, lapack_int ldb,\n                            float* t, lapack_int ldt );\nlapack_int LAPACKE_dtpqrt2( int matrix_order,\n                            lapack_int m, lapack_int n, lapack_int l,\n                            double* a, lapack_int lda,\n                            double* b, lapack_int ldb,\n                            double* t, lapack_int ldt );\nlapack_int LAPACKE_ctpqrt2( int matrix_order, \n                            lapack_int m, lapack_int n, lapack_int l,\n                            lapack_complex_float* a, lapack_int lda,\n                            lapack_complex_float* b, lapack_int ldb,\n                            lapack_complex_float* t, lapack_int ldt );\nlapack_int LAPACKE_ztpqrt2( int matrix_order,\n                            lapack_int m, lapack_int n, lapack_int l,\n                            lapack_complex_double* a, lapack_int lda,\n                            lapack_complex_double* b, lapack_int ldb,\n                            lapack_complex_double* t, lapack_int ldt );\n\nlapack_int LAPACKE_stprfb( int matrix_order, char side, char trans, char direct,\n                           char storev, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_int l, const float* v,\n                           lapack_int ldv, const float* t, lapack_int ldt,\n                           float* a, lapack_int lda, float* b, lapack_int ldb );\nlapack_int LAPACKE_dtprfb( int matrix_order, char side, char trans, char direct,\n                           char storev, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_int l, const double* v,\n                           lapack_int ldv, const double* t, lapack_int ldt,\n                           double* a, lapack_int lda, double* b, lapack_int ldb );\nlapack_int LAPACKE_ctprfb( int matrix_order, char side, char trans, char direct,\n                           char storev, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_int l,\n                           const lapack_complex_float* v, lapack_int ldv,\n                           const lapack_complex_float* t, lapack_int ldt,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_ztprfb( int matrix_order, char side, char trans, char direct,\n                           char storev, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_int l,\n                           const lapack_complex_double* v, lapack_int ldv,\n                           const lapack_complex_double* t, lapack_int ldt,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sgemqrt_work( int matrix_order, char side, char trans,\n                                 lapack_int m, lapack_int n, lapack_int k,\n                                 lapack_int nb, const float* v, lapack_int ldv,\n                                 const float* t, lapack_int ldt, float* c,\n                                 lapack_int ldc, float* work );\nlapack_int LAPACKE_dgemqrt_work( int matrix_order, char side, char trans,\n                                 lapack_int m, lapack_int n, lapack_int k,\n                                 lapack_int nb, const double* v, lapack_int ldv,\n                                 const double* t, lapack_int ldt, double* c,\n                                 lapack_int ldc, double* work );\nlapack_int LAPACKE_cgemqrt_work( int matrix_order, char side, char trans,\n                                 lapack_int m, lapack_int n, lapack_int k,\n                                 lapack_int nb, const lapack_complex_float* v,\n                                 lapack_int ldv, const lapack_complex_float* t,\n                                 lapack_int ldt, lapack_complex_float* c,\n                                 lapack_int ldc, lapack_complex_float* work );\nlapack_int LAPACKE_zgemqrt_work( int matrix_order, char side, char trans,\n                                 lapack_int m, lapack_int n, lapack_int k,\n                                 lapack_int nb, const lapack_complex_double* v,\n                                 lapack_int ldv, const lapack_complex_double* t,\n                                 lapack_int ldt, lapack_complex_double* c,\n                                 lapack_int ldc, lapack_complex_double* work );\n\nlapack_int LAPACKE_sgeqrt_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nb, float* a, lapack_int lda,\n                                float* t, lapack_int ldt, float* work );\nlapack_int LAPACKE_dgeqrt_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nb, double* a, lapack_int lda,\n                                double* t, lapack_int ldt, double* work );\nlapack_int LAPACKE_cgeqrt_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nb, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* t,\n                                lapack_int ldt, lapack_complex_float* work );\nlapack_int LAPACKE_zgeqrt_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nb, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* t,\n                                lapack_int ldt, lapack_complex_double* work );\n\nlapack_int LAPACKE_sgeqrt2_work( int matrix_order, lapack_int m, lapack_int n,\n                                 float* a, lapack_int lda, float* t,\n                                 lapack_int ldt );\nlapack_int LAPACKE_dgeqrt2_work( int matrix_order, lapack_int m, lapack_int n,\n                                 double* a, lapack_int lda, double* t,\n                                 lapack_int ldt );\nlapack_int LAPACKE_cgeqrt2_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 lapack_complex_float* t, lapack_int ldt );\nlapack_int LAPACKE_zgeqrt2_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 lapack_complex_double* t, lapack_int ldt );\n\nlapack_int LAPACKE_sgeqrt3_work( int matrix_order, lapack_int m, lapack_int n,\n                                 float* a, lapack_int lda, float* t,\n                                 lapack_int ldt );\nlapack_int LAPACKE_dgeqrt3_work( int matrix_order, lapack_int m, lapack_int n,\n                                 double* a, lapack_int lda, double* t,\n                                 lapack_int ldt );\nlapack_int LAPACKE_cgeqrt3_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 lapack_complex_float* t, lapack_int ldt );\nlapack_int LAPACKE_zgeqrt3_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 lapack_complex_double* t, lapack_int ldt );\n\nlapack_int LAPACKE_stpmqrt_work( int matrix_order, char side, char trans,\n                                 lapack_int m, lapack_int n, lapack_int k,\n                                 lapack_int l, lapack_int nb, const float* v,\n                                 lapack_int ldv, const float* t, lapack_int ldt,\n                                 float* a, lapack_int lda, float* b,\n                                 lapack_int ldb, float* work );\nlapack_int LAPACKE_dtpmqrt_work( int matrix_order, char side, char trans,\n                                 lapack_int m, lapack_int n, lapack_int k,\n                                 lapack_int l, lapack_int nb, const double* v,\n                                 lapack_int ldv, const double* t,\n                                 lapack_int ldt, double* a, lapack_int lda,\n                                 double* b, lapack_int ldb, double* work );\nlapack_int LAPACKE_ctpmqrt_work( int matrix_order, char side, char trans,\n                                 lapack_int m, lapack_int n, lapack_int k,\n                                 lapack_int l, lapack_int nb,\n                                 const lapack_complex_float* v, lapack_int ldv,\n                                 const lapack_complex_float* t, lapack_int ldt,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 lapack_complex_float* b, lapack_int ldb,\n                                 lapack_complex_float* work );\nlapack_int LAPACKE_ztpmqrt_work( int matrix_order, char side, char trans,\n                                 lapack_int m, lapack_int n, lapack_int k,\n                                 lapack_int l, lapack_int nb,\n                                 const lapack_complex_double* v, lapack_int ldv,\n                                 const lapack_complex_double* t, lapack_int ldt,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* work );\n\nlapack_int LAPACKE_dtpqrt_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int l, lapack_int nb, double* a,\n                                lapack_int lda, double* b, lapack_int ldb,\n                                double* t, lapack_int ldt, double* work );\nlapack_int LAPACKE_ctpqrt_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int l, lapack_int nb,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* t, lapack_int ldt,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_ztpqrt_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int l, lapack_int nb,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* t, lapack_int ldt,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_stpqrt2_work( int matrix_order,\n                                 lapack_int m, lapack_int n, lapack_int l,\n                                 float* a, lapack_int lda,\n                                 float* b, lapack_int ldb,\n                                 float* t, lapack_int ldt );\nlapack_int LAPACKE_dtpqrt2_work( int matrix_order,\n                                 lapack_int m, lapack_int n, lapack_int l,\n                                 double* a, lapack_int lda,\n                                 double* b, lapack_int ldb,\n                                 double* t, lapack_int ldt );\nlapack_int LAPACKE_ctpqrt2_work( int matrix_order,\n                                 lapack_int m, lapack_int n, lapack_int l,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 lapack_complex_float* b, lapack_int ldb,\n                                 lapack_complex_float* t, lapack_int ldt );\nlapack_int LAPACKE_ztpqrt2_work( int matrix_order,\n                                 lapack_int m, lapack_int n, lapack_int l,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* t, lapack_int ldt );\n\nlapack_int LAPACKE_stprfb_work( int matrix_order, char side, char trans,\n                                char direct, char storev, lapack_int m,\n                                lapack_int n, lapack_int k, lapack_int l,\n                                const float* v, lapack_int ldv, const float* t,\n                                lapack_int ldt, float* a, lapack_int lda,\n                                float* b, lapack_int ldb, const float* work,\n                                lapack_int ldwork );\nlapack_int LAPACKE_dtprfb_work( int matrix_order, char side, char trans,\n                                char direct, char storev, lapack_int m,\n                                lapack_int n, lapack_int k, lapack_int l,\n                                const double* v, lapack_int ldv,\n                                const double* t, lapack_int ldt, double* a,\n                                lapack_int lda, double* b, lapack_int ldb,\n                                const double* work, lapack_int ldwork );\nlapack_int LAPACKE_ctprfb_work( int matrix_order, char side, char trans,\n                                char direct, char storev, lapack_int m,\n                                lapack_int n, lapack_int k, lapack_int l,\n                                const lapack_complex_float* v, lapack_int ldv,\n                                const lapack_complex_float* t, lapack_int ldt,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                const float* work, lapack_int ldwork );\nlapack_int LAPACKE_ztprfb_work( int matrix_order, char side, char trans,\n                                char direct, char storev, lapack_int m,\n                                lapack_int n, lapack_int k, lapack_int l,\n                                const lapack_complex_double* v, lapack_int ldv,\n                                const lapack_complex_double* t, lapack_int ldt,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                const double* work, lapack_int ldwork );\n//LAPACK 3.X.X\nlapack_int LAPACKE_ssysv_rook( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, float* a, lapack_int lda,\n                               lapack_int* ipiv, float* b, lapack_int ldb );\nlapack_int LAPACKE_dsysv_rook( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, double* a, lapack_int lda,\n                               lapack_int* ipiv, double* b, lapack_int ldb );\nlapack_int LAPACKE_csysv_rook( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_float* a,\n                               lapack_int lda, lapack_int* ipiv,\n                               lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zsysv_rook( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_double* a,\n                               lapack_int lda, lapack_int* ipiv,\n                               lapack_complex_double* b, lapack_int ldb );\nlapack_int LAPACKE_csyr( int matrix_order, char uplo, lapack_int n,\n                             lapack_complex_float alpha,\n                             const lapack_complex_float* x, lapack_int incx,\n                             lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_zsyr( int matrix_order, char uplo, lapack_int n,\n                             lapack_complex_double alpha,\n                             const lapack_complex_double* x, lapack_int incx,\n                             lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_ssysv_rook_work( int matrix_order, char uplo, lapack_int n,\n                                    lapack_int nrhs, float* a, lapack_int lda,\n                                    lapack_int* ipiv, float* b, lapack_int ldb,\n                                    float* work, lapack_int lwork );\nlapack_int LAPACKE_dsysv_rook_work( int matrix_order, char uplo, lapack_int n,\n                                    lapack_int nrhs, double* a, lapack_int lda,\n                                    lapack_int* ipiv, double* b, lapack_int ldb,\n                                    double* work, lapack_int lwork );\nlapack_int LAPACKE_csysv_rook_work( int matrix_order, char uplo, lapack_int n,\n                                    lapack_int nrhs, lapack_complex_float* a,\n                                    lapack_int lda, lapack_int* ipiv,\n                                    lapack_complex_float* b, lapack_int ldb,\n                                    lapack_complex_float* work,\n                                    lapack_int lwork );\nlapack_int LAPACKE_zsysv_rook_work( int matrix_order, char uplo, lapack_int n,\n                                    lapack_int nrhs, lapack_complex_double* a,\n                                    lapack_int lda, lapack_int* ipiv,\n                                    lapack_complex_double* b, lapack_int ldb,\n                                    lapack_complex_double* work,\n                                    lapack_int lwork );\nlapack_int LAPACKE_csyr_work( int matrix_order, char uplo, lapack_int n,\n                                  lapack_complex_float alpha,\n                                  const lapack_complex_float* x,\n                                  lapack_int incx, lapack_complex_float* a,\n                                  lapack_int lda );\nlapack_int LAPACKE_zsyr_work( int matrix_order, char uplo, lapack_int n,\n                                  lapack_complex_double alpha,\n                                  const lapack_complex_double* x,\n                                  lapack_int incx, lapack_complex_double* a,\n                                  lapack_int lda );\nvoid LAPACKE_ilaver( const lapack_int* vers_major,\n                     const lapack_int* vers_minor,\n                     const lapack_int* vers_patch );\n\n\n#define LAPACK_sgetrf LAPACK_GLOBAL(sgetrf,SGETRF)\n#define LAPACK_dgetrf LAPACK_GLOBAL(dgetrf,DGETRF)\n#define LAPACK_cgetrf LAPACK_GLOBAL(cgetrf,CGETRF)\n#define LAPACK_zgetrf LAPACK_GLOBAL(zgetrf,ZGETRF)\n#define LAPACK_sgbtrf LAPACK_GLOBAL(sgbtrf,SGBTRF)\n#define LAPACK_dgbtrf LAPACK_GLOBAL(dgbtrf,DGBTRF)\n#define LAPACK_cgbtrf LAPACK_GLOBAL(cgbtrf,CGBTRF)\n#define LAPACK_zgbtrf LAPACK_GLOBAL(zgbtrf,ZGBTRF)\n#define LAPACK_sgttrf LAPACK_GLOBAL(sgttrf,SGTTRF)\n#define LAPACK_dgttrf LAPACK_GLOBAL(dgttrf,DGTTRF)\n#define LAPACK_cgttrf LAPACK_GLOBAL(cgttrf,CGTTRF)\n#define LAPACK_zgttrf LAPACK_GLOBAL(zgttrf,ZGTTRF)\n#define LAPACK_spotrf LAPACK_GLOBAL(spotrf,SPOTRF)\n#define LAPACK_dpotrf LAPACK_GLOBAL(dpotrf,DPOTRF)\n#define LAPACK_cpotrf LAPACK_GLOBAL(cpotrf,CPOTRF)\n#define LAPACK_zpotrf LAPACK_GLOBAL(zpotrf,ZPOTRF)\n#define LAPACK_dpstrf LAPACK_GLOBAL(dpstrf,DPSTRF)\n#define LAPACK_spstrf LAPACK_GLOBAL(spstrf,SPSTRF)\n#define LAPACK_zpstrf LAPACK_GLOBAL(zpstrf,ZPSTRF)\n#define LAPACK_cpstrf LAPACK_GLOBAL(cpstrf,CPSTRF)\n#define LAPACK_dpftrf LAPACK_GLOBAL(dpftrf,DPFTRF)\n#define LAPACK_spftrf LAPACK_GLOBAL(spftrf,SPFTRF)\n#define LAPACK_zpftrf LAPACK_GLOBAL(zpftrf,ZPFTRF)\n#define LAPACK_cpftrf LAPACK_GLOBAL(cpftrf,CPFTRF)\n#define LAPACK_spptrf LAPACK_GLOBAL(spptrf,SPPTRF)\n#define LAPACK_dpptrf LAPACK_GLOBAL(dpptrf,DPPTRF)\n#define LAPACK_cpptrf LAPACK_GLOBAL(cpptrf,CPPTRF)\n#define LAPACK_zpptrf LAPACK_GLOBAL(zpptrf,ZPPTRF)\n#define LAPACK_spbtrf LAPACK_GLOBAL(spbtrf,SPBTRF)\n#define LAPACK_dpbtrf LAPACK_GLOBAL(dpbtrf,DPBTRF)\n#define LAPACK_cpbtrf LAPACK_GLOBAL(cpbtrf,CPBTRF)\n#define LAPACK_zpbtrf LAPACK_GLOBAL(zpbtrf,ZPBTRF)\n#define LAPACK_spttrf LAPACK_GLOBAL(spttrf,SPTTRF)\n#define LAPACK_dpttrf LAPACK_GLOBAL(dpttrf,DPTTRF)\n#define LAPACK_cpttrf LAPACK_GLOBAL(cpttrf,CPTTRF)\n#define LAPACK_zpttrf LAPACK_GLOBAL(zpttrf,ZPTTRF)\n#define LAPACK_ssytrf LAPACK_GLOBAL(ssytrf,SSYTRF)\n#define LAPACK_dsytrf LAPACK_GLOBAL(dsytrf,DSYTRF)\n#define LAPACK_csytrf LAPACK_GLOBAL(csytrf,CSYTRF)\n#define LAPACK_zsytrf LAPACK_GLOBAL(zsytrf,ZSYTRF)\n#define LAPACK_chetrf LAPACK_GLOBAL(chetrf,CHETRF)\n#define LAPACK_zhetrf LAPACK_GLOBAL(zhetrf,ZHETRF)\n#define LAPACK_ssptrf LAPACK_GLOBAL(ssptrf,SSPTRF)\n#define LAPACK_dsptrf LAPACK_GLOBAL(dsptrf,DSPTRF)\n#define LAPACK_csptrf LAPACK_GLOBAL(csptrf,CSPTRF)\n#define LAPACK_zsptrf LAPACK_GLOBAL(zsptrf,ZSPTRF)\n#define LAPACK_chptrf LAPACK_GLOBAL(chptrf,CHPTRF)\n#define LAPACK_zhptrf LAPACK_GLOBAL(zhptrf,ZHPTRF)\n#define LAPACK_sgetrs LAPACK_GLOBAL(sgetrs,SGETRS)\n#define LAPACK_dgetrs LAPACK_GLOBAL(dgetrs,DGETRS)\n#define LAPACK_cgetrs LAPACK_GLOBAL(cgetrs,CGETRS)\n#define LAPACK_zgetrs LAPACK_GLOBAL(zgetrs,ZGETRS)\n#define LAPACK_sgbtrs LAPACK_GLOBAL(sgbtrs,SGBTRS)\n#define LAPACK_dgbtrs LAPACK_GLOBAL(dgbtrs,DGBTRS)\n#define LAPACK_cgbtrs LAPACK_GLOBAL(cgbtrs,CGBTRS)\n#define LAPACK_zgbtrs LAPACK_GLOBAL(zgbtrs,ZGBTRS)\n#define LAPACK_sgttrs LAPACK_GLOBAL(sgttrs,SGTTRS)\n#define LAPACK_dgttrs LAPACK_GLOBAL(dgttrs,DGTTRS)\n#define LAPACK_cgttrs LAPACK_GLOBAL(cgttrs,CGTTRS)\n#define LAPACK_zgttrs LAPACK_GLOBAL(zgttrs,ZGTTRS)\n#define LAPACK_spotrs LAPACK_GLOBAL(spotrs,SPOTRS)\n#define LAPACK_dpotrs LAPACK_GLOBAL(dpotrs,DPOTRS)\n#define LAPACK_cpotrs LAPACK_GLOBAL(cpotrs,CPOTRS)\n#define LAPACK_zpotrs LAPACK_GLOBAL(zpotrs,ZPOTRS)\n#define LAPACK_dpftrs LAPACK_GLOBAL(dpftrs,DPFTRS)\n#define LAPACK_spftrs LAPACK_GLOBAL(spftrs,SPFTRS)\n#define LAPACK_zpftrs LAPACK_GLOBAL(zpftrs,ZPFTRS)\n#define LAPACK_cpftrs LAPACK_GLOBAL(cpftrs,CPFTRS)\n#define LAPACK_spptrs LAPACK_GLOBAL(spptrs,SPPTRS)\n#define LAPACK_dpptrs LAPACK_GLOBAL(dpptrs,DPPTRS)\n#define LAPACK_cpptrs LAPACK_GLOBAL(cpptrs,CPPTRS)\n#define LAPACK_zpptrs LAPACK_GLOBAL(zpptrs,ZPPTRS)\n#define LAPACK_spbtrs LAPACK_GLOBAL(spbtrs,SPBTRS)\n#define LAPACK_dpbtrs LAPACK_GLOBAL(dpbtrs,DPBTRS)\n#define LAPACK_cpbtrs LAPACK_GLOBAL(cpbtrs,CPBTRS)\n#define LAPACK_zpbtrs LAPACK_GLOBAL(zpbtrs,ZPBTRS)\n#define LAPACK_spttrs LAPACK_GLOBAL(spttrs,SPTTRS)\n#define LAPACK_dpttrs LAPACK_GLOBAL(dpttrs,DPTTRS)\n#define LAPACK_cpttrs LAPACK_GLOBAL(cpttrs,CPTTRS)\n#define LAPACK_zpttrs LAPACK_GLOBAL(zpttrs,ZPTTRS)\n#define LAPACK_ssytrs LAPACK_GLOBAL(ssytrs,SSYTRS)\n#define LAPACK_dsytrs LAPACK_GLOBAL(dsytrs,DSYTRS)\n#define LAPACK_csytrs LAPACK_GLOBAL(csytrs,CSYTRS)\n#define LAPACK_zsytrs LAPACK_GLOBAL(zsytrs,ZSYTRS)\n#define LAPACK_chetrs LAPACK_GLOBAL(chetrs,CHETRS)\n#define LAPACK_zhetrs LAPACK_GLOBAL(zhetrs,ZHETRS)\n#define LAPACK_ssptrs LAPACK_GLOBAL(ssptrs,SSPTRS)\n#define LAPACK_dsptrs LAPACK_GLOBAL(dsptrs,DSPTRS)\n#define LAPACK_csptrs LAPACK_GLOBAL(csptrs,CSPTRS)\n#define LAPACK_zsptrs LAPACK_GLOBAL(zsptrs,ZSPTRS)\n#define LAPACK_chptrs LAPACK_GLOBAL(chptrs,CHPTRS)\n#define LAPACK_zhptrs LAPACK_GLOBAL(zhptrs,ZHPTRS)\n#define LAPACK_strtrs LAPACK_GLOBAL(strtrs,STRTRS)\n#define LAPACK_dtrtrs LAPACK_GLOBAL(dtrtrs,DTRTRS)\n#define LAPACK_ctrtrs LAPACK_GLOBAL(ctrtrs,CTRTRS)\n#define LAPACK_ztrtrs LAPACK_GLOBAL(ztrtrs,ZTRTRS)\n#define LAPACK_stptrs LAPACK_GLOBAL(stptrs,STPTRS)\n#define LAPACK_dtptrs LAPACK_GLOBAL(dtptrs,DTPTRS)\n#define LAPACK_ctptrs LAPACK_GLOBAL(ctptrs,CTPTRS)\n#define LAPACK_ztptrs LAPACK_GLOBAL(ztptrs,ZTPTRS)\n#define LAPACK_stbtrs LAPACK_GLOBAL(stbtrs,STBTRS)\n#define LAPACK_dtbtrs LAPACK_GLOBAL(dtbtrs,DTBTRS)\n#define LAPACK_ctbtrs LAPACK_GLOBAL(ctbtrs,CTBTRS)\n#define LAPACK_ztbtrs LAPACK_GLOBAL(ztbtrs,ZTBTRS)\n#define LAPACK_sgecon LAPACK_GLOBAL(sgecon,SGECON)\n#define LAPACK_dgecon LAPACK_GLOBAL(dgecon,DGECON)\n#define LAPACK_cgecon LAPACK_GLOBAL(cgecon,CGECON)\n#define LAPACK_zgecon LAPACK_GLOBAL(zgecon,ZGECON)\n#define LAPACK_sgbcon LAPACK_GLOBAL(sgbcon,SGBCON)\n#define LAPACK_dgbcon LAPACK_GLOBAL(dgbcon,DGBCON)\n#define LAPACK_cgbcon LAPACK_GLOBAL(cgbcon,CGBCON)\n#define LAPACK_zgbcon LAPACK_GLOBAL(zgbcon,ZGBCON)\n#define LAPACK_sgtcon LAPACK_GLOBAL(sgtcon,SGTCON)\n#define LAPACK_dgtcon LAPACK_GLOBAL(dgtcon,DGTCON)\n#define LAPACK_cgtcon LAPACK_GLOBAL(cgtcon,CGTCON)\n#define LAPACK_zgtcon LAPACK_GLOBAL(zgtcon,ZGTCON)\n#define LAPACK_spocon LAPACK_GLOBAL(spocon,SPOCON)\n#define LAPACK_dpocon LAPACK_GLOBAL(dpocon,DPOCON)\n#define LAPACK_cpocon LAPACK_GLOBAL(cpocon,CPOCON)\n#define LAPACK_zpocon LAPACK_GLOBAL(zpocon,ZPOCON)\n#define LAPACK_sppcon LAPACK_GLOBAL(sppcon,SPPCON)\n#define LAPACK_dppcon LAPACK_GLOBAL(dppcon,DPPCON)\n#define LAPACK_cppcon LAPACK_GLOBAL(cppcon,CPPCON)\n#define LAPACK_zppcon LAPACK_GLOBAL(zppcon,ZPPCON)\n#define LAPACK_spbcon LAPACK_GLOBAL(spbcon,SPBCON)\n#define LAPACK_dpbcon LAPACK_GLOBAL(dpbcon,DPBCON)\n#define LAPACK_cpbcon LAPACK_GLOBAL(cpbcon,CPBCON)\n#define LAPACK_zpbcon LAPACK_GLOBAL(zpbcon,ZPBCON)\n#define LAPACK_sptcon LAPACK_GLOBAL(sptcon,SPTCON)\n#define LAPACK_dptcon LAPACK_GLOBAL(dptcon,DPTCON)\n#define LAPACK_cptcon LAPACK_GLOBAL(cptcon,CPTCON)\n#define LAPACK_zptcon LAPACK_GLOBAL(zptcon,ZPTCON)\n#define LAPACK_ssycon LAPACK_GLOBAL(ssycon,SSYCON)\n#define LAPACK_dsycon LAPACK_GLOBAL(dsycon,DSYCON)\n#define LAPACK_csycon LAPACK_GLOBAL(csycon,CSYCON)\n#define LAPACK_zsycon LAPACK_GLOBAL(zsycon,ZSYCON)\n#define LAPACK_checon LAPACK_GLOBAL(checon,CHECON)\n#define LAPACK_zhecon LAPACK_GLOBAL(zhecon,ZHECON)\n#define LAPACK_sspcon LAPACK_GLOBAL(sspcon,SSPCON)\n#define LAPACK_dspcon LAPACK_GLOBAL(dspcon,DSPCON)\n#define LAPACK_cspcon LAPACK_GLOBAL(cspcon,CSPCON)\n#define LAPACK_zspcon LAPACK_GLOBAL(zspcon,ZSPCON)\n#define LAPACK_chpcon LAPACK_GLOBAL(chpcon,CHPCON)\n#define LAPACK_zhpcon LAPACK_GLOBAL(zhpcon,ZHPCON)\n#define LAPACK_strcon LAPACK_GLOBAL(strcon,STRCON)\n#define LAPACK_dtrcon LAPACK_GLOBAL(dtrcon,DTRCON)\n#define LAPACK_ctrcon LAPACK_GLOBAL(ctrcon,CTRCON)\n#define LAPACK_ztrcon LAPACK_GLOBAL(ztrcon,ZTRCON)\n#define LAPACK_stpcon LAPACK_GLOBAL(stpcon,STPCON)\n#define LAPACK_dtpcon LAPACK_GLOBAL(dtpcon,DTPCON)\n#define LAPACK_ctpcon LAPACK_GLOBAL(ctpcon,CTPCON)\n#define LAPACK_ztpcon LAPACK_GLOBAL(ztpcon,ZTPCON)\n#define LAPACK_stbcon LAPACK_GLOBAL(stbcon,STBCON)\n#define LAPACK_dtbcon LAPACK_GLOBAL(dtbcon,DTBCON)\n#define LAPACK_ctbcon LAPACK_GLOBAL(ctbcon,CTBCON)\n#define LAPACK_ztbcon LAPACK_GLOBAL(ztbcon,ZTBCON)\n#define LAPACK_sgerfs LAPACK_GLOBAL(sgerfs,SGERFS)\n#define LAPACK_dgerfs LAPACK_GLOBAL(dgerfs,DGERFS)\n#define LAPACK_cgerfs LAPACK_GLOBAL(cgerfs,CGERFS)\n#define LAPACK_zgerfs LAPACK_GLOBAL(zgerfs,ZGERFS)\n#define LAPACK_dgerfsx LAPACK_GLOBAL(dgerfsx,DGERFSX)\n#define LAPACK_sgerfsx LAPACK_GLOBAL(sgerfsx,SGERFSX)\n#define LAPACK_zgerfsx LAPACK_GLOBAL(zgerfsx,ZGERFSX)\n#define LAPACK_cgerfsx LAPACK_GLOBAL(cgerfsx,CGERFSX)\n#define LAPACK_sgbrfs LAPACK_GLOBAL(sgbrfs,SGBRFS)\n#define LAPACK_dgbrfs LAPACK_GLOBAL(dgbrfs,DGBRFS)\n#define LAPACK_cgbrfs LAPACK_GLOBAL(cgbrfs,CGBRFS)\n#define LAPACK_zgbrfs LAPACK_GLOBAL(zgbrfs,ZGBRFS)\n#define LAPACK_dgbrfsx LAPACK_GLOBAL(dgbrfsx,DGBRFSX)\n#define LAPACK_sgbrfsx LAPACK_GLOBAL(sgbrfsx,SGBRFSX)\n#define LAPACK_zgbrfsx LAPACK_GLOBAL(zgbrfsx,ZGBRFSX)\n#define LAPACK_cgbrfsx LAPACK_GLOBAL(cgbrfsx,CGBRFSX)\n#define LAPACK_sgtrfs LAPACK_GLOBAL(sgtrfs,SGTRFS)\n#define LAPACK_dgtrfs LAPACK_GLOBAL(dgtrfs,DGTRFS)\n#define LAPACK_cgtrfs LAPACK_GLOBAL(cgtrfs,CGTRFS)\n#define LAPACK_zgtrfs LAPACK_GLOBAL(zgtrfs,ZGTRFS)\n#define LAPACK_sporfs LAPACK_GLOBAL(sporfs,SPORFS)\n#define LAPACK_dporfs LAPACK_GLOBAL(dporfs,DPORFS)\n#define LAPACK_cporfs LAPACK_GLOBAL(cporfs,CPORFS)\n#define LAPACK_zporfs LAPACK_GLOBAL(zporfs,ZPORFS)\n#define LAPACK_dporfsx LAPACK_GLOBAL(dporfsx,DPORFSX)\n#define LAPACK_sporfsx LAPACK_GLOBAL(sporfsx,SPORFSX)\n#define LAPACK_zporfsx LAPACK_GLOBAL(zporfsx,ZPORFSX)\n#define LAPACK_cporfsx LAPACK_GLOBAL(cporfsx,CPORFSX)\n#define LAPACK_spprfs LAPACK_GLOBAL(spprfs,SPPRFS)\n#define LAPACK_dpprfs LAPACK_GLOBAL(dpprfs,DPPRFS)\n#define LAPACK_cpprfs LAPACK_GLOBAL(cpprfs,CPPRFS)\n#define LAPACK_zpprfs LAPACK_GLOBAL(zpprfs,ZPPRFS)\n#define LAPACK_spbrfs LAPACK_GLOBAL(spbrfs,SPBRFS)\n#define LAPACK_dpbrfs LAPACK_GLOBAL(dpbrfs,DPBRFS)\n#define LAPACK_cpbrfs LAPACK_GLOBAL(cpbrfs,CPBRFS)\n#define LAPACK_zpbrfs LAPACK_GLOBAL(zpbrfs,ZPBRFS)\n#define LAPACK_sptrfs LAPACK_GLOBAL(sptrfs,SPTRFS)\n#define LAPACK_dptrfs LAPACK_GLOBAL(dptrfs,DPTRFS)\n#define LAPACK_cptrfs LAPACK_GLOBAL(cptrfs,CPTRFS)\n#define LAPACK_zptrfs LAPACK_GLOBAL(zptrfs,ZPTRFS)\n#define LAPACK_ssyrfs LAPACK_GLOBAL(ssyrfs,SSYRFS)\n#define LAPACK_dsyrfs LAPACK_GLOBAL(dsyrfs,DSYRFS)\n#define LAPACK_csyrfs LAPACK_GLOBAL(csyrfs,CSYRFS)\n#define LAPACK_zsyrfs LAPACK_GLOBAL(zsyrfs,ZSYRFS)\n#define LAPACK_dsyrfsx LAPACK_GLOBAL(dsyrfsx,DSYRFSX)\n#define LAPACK_ssyrfsx LAPACK_GLOBAL(ssyrfsx,SSYRFSX)\n#define LAPACK_zsyrfsx LAPACK_GLOBAL(zsyrfsx,ZSYRFSX)\n#define LAPACK_csyrfsx LAPACK_GLOBAL(csyrfsx,CSYRFSX)\n#define LAPACK_cherfs LAPACK_GLOBAL(cherfs,CHERFS)\n#define LAPACK_zherfs LAPACK_GLOBAL(zherfs,ZHERFS)\n#define LAPACK_zherfsx LAPACK_GLOBAL(zherfsx,ZHERFSX)\n#define LAPACK_cherfsx LAPACK_GLOBAL(cherfsx,CHERFSX)\n#define LAPACK_ssprfs LAPACK_GLOBAL(ssprfs,SSPRFS)\n#define LAPACK_dsprfs LAPACK_GLOBAL(dsprfs,DSPRFS)\n#define LAPACK_csprfs LAPACK_GLOBAL(csprfs,CSPRFS)\n#define LAPACK_zsprfs LAPACK_GLOBAL(zsprfs,ZSPRFS)\n#define LAPACK_chprfs LAPACK_GLOBAL(chprfs,CHPRFS)\n#define LAPACK_zhprfs LAPACK_GLOBAL(zhprfs,ZHPRFS)\n#define LAPACK_strrfs LAPACK_GLOBAL(strrfs,STRRFS)\n#define LAPACK_dtrrfs LAPACK_GLOBAL(dtrrfs,DTRRFS)\n#define LAPACK_ctrrfs LAPACK_GLOBAL(ctrrfs,CTRRFS)\n#define LAPACK_ztrrfs LAPACK_GLOBAL(ztrrfs,ZTRRFS)\n#define LAPACK_stprfs LAPACK_GLOBAL(stprfs,STPRFS)\n#define LAPACK_dtprfs LAPACK_GLOBAL(dtprfs,DTPRFS)\n#define LAPACK_ctprfs LAPACK_GLOBAL(ctprfs,CTPRFS)\n#define LAPACK_ztprfs LAPACK_GLOBAL(ztprfs,ZTPRFS)\n#define LAPACK_stbrfs LAPACK_GLOBAL(stbrfs,STBRFS)\n#define LAPACK_dtbrfs LAPACK_GLOBAL(dtbrfs,DTBRFS)\n#define LAPACK_ctbrfs LAPACK_GLOBAL(ctbrfs,CTBRFS)\n#define LAPACK_ztbrfs LAPACK_GLOBAL(ztbrfs,ZTBRFS)\n#define LAPACK_sgetri LAPACK_GLOBAL(sgetri,SGETRI)\n#define LAPACK_dgetri LAPACK_GLOBAL(dgetri,DGETRI)\n#define LAPACK_cgetri LAPACK_GLOBAL(cgetri,CGETRI)\n#define LAPACK_zgetri LAPACK_GLOBAL(zgetri,ZGETRI)\n#define LAPACK_spotri LAPACK_GLOBAL(spotri,SPOTRI)\n#define LAPACK_dpotri LAPACK_GLOBAL(dpotri,DPOTRI)\n#define LAPACK_cpotri LAPACK_GLOBAL(cpotri,CPOTRI)\n#define LAPACK_zpotri LAPACK_GLOBAL(zpotri,ZPOTRI)\n#define LAPACK_dpftri LAPACK_GLOBAL(dpftri,DPFTRI)\n#define LAPACK_spftri LAPACK_GLOBAL(spftri,SPFTRI)\n#define LAPACK_zpftri LAPACK_GLOBAL(zpftri,ZPFTRI)\n#define LAPACK_cpftri LAPACK_GLOBAL(cpftri,CPFTRI)\n#define LAPACK_spptri LAPACK_GLOBAL(spptri,SPPTRI)\n#define LAPACK_dpptri LAPACK_GLOBAL(dpptri,DPPTRI)\n#define LAPACK_cpptri LAPACK_GLOBAL(cpptri,CPPTRI)\n#define LAPACK_zpptri LAPACK_GLOBAL(zpptri,ZPPTRI)\n#define LAPACK_ssytri LAPACK_GLOBAL(ssytri,SSYTRI)\n#define LAPACK_dsytri LAPACK_GLOBAL(dsytri,DSYTRI)\n#define LAPACK_csytri LAPACK_GLOBAL(csytri,CSYTRI)\n#define LAPACK_zsytri LAPACK_GLOBAL(zsytri,ZSYTRI)\n#define LAPACK_chetri LAPACK_GLOBAL(chetri,CHETRI)\n#define LAPACK_zhetri LAPACK_GLOBAL(zhetri,ZHETRI)\n#define LAPACK_ssptri LAPACK_GLOBAL(ssptri,SSPTRI)\n#define LAPACK_dsptri LAPACK_GLOBAL(dsptri,DSPTRI)\n#define LAPACK_csptri LAPACK_GLOBAL(csptri,CSPTRI)\n#define LAPACK_zsptri LAPACK_GLOBAL(zsptri,ZSPTRI)\n#define LAPACK_chptri LAPACK_GLOBAL(chptri,CHPTRI)\n#define LAPACK_zhptri LAPACK_GLOBAL(zhptri,ZHPTRI)\n#define LAPACK_strtri LAPACK_GLOBAL(strtri,STRTRI)\n#define LAPACK_dtrtri LAPACK_GLOBAL(dtrtri,DTRTRI)\n#define LAPACK_ctrtri LAPACK_GLOBAL(ctrtri,CTRTRI)\n#define LAPACK_ztrtri LAPACK_GLOBAL(ztrtri,ZTRTRI)\n#define LAPACK_dtftri LAPACK_GLOBAL(dtftri,DTFTRI)\n#define LAPACK_stftri LAPACK_GLOBAL(stftri,STFTRI)\n#define LAPACK_ztftri LAPACK_GLOBAL(ztftri,ZTFTRI)\n#define LAPACK_ctftri LAPACK_GLOBAL(ctftri,CTFTRI)\n#define LAPACK_stptri LAPACK_GLOBAL(stptri,STPTRI)\n#define LAPACK_dtptri LAPACK_GLOBAL(dtptri,DTPTRI)\n#define LAPACK_ctptri LAPACK_GLOBAL(ctptri,CTPTRI)\n#define LAPACK_ztptri LAPACK_GLOBAL(ztptri,ZTPTRI)\n#define LAPACK_sgeequ LAPACK_GLOBAL(sgeequ,SGEEQU)\n#define LAPACK_dgeequ LAPACK_GLOBAL(dgeequ,DGEEQU)\n#define LAPACK_cgeequ LAPACK_GLOBAL(cgeequ,CGEEQU)\n#define LAPACK_zgeequ LAPACK_GLOBAL(zgeequ,ZGEEQU)\n#define LAPACK_dgeequb LAPACK_GLOBAL(dgeequb,DGEEQUB)\n#define LAPACK_sgeequb LAPACK_GLOBAL(sgeequb,SGEEQUB)\n#define LAPACK_zgeequb LAPACK_GLOBAL(zgeequb,ZGEEQUB)\n#define LAPACK_cgeequb LAPACK_GLOBAL(cgeequb,CGEEQUB)\n#define LAPACK_sgbequ LAPACK_GLOBAL(sgbequ,SGBEQU)\n#define LAPACK_dgbequ LAPACK_GLOBAL(dgbequ,DGBEQU)\n#define LAPACK_cgbequ LAPACK_GLOBAL(cgbequ,CGBEQU)\n#define LAPACK_zgbequ LAPACK_GLOBAL(zgbequ,ZGBEQU)\n#define LAPACK_dgbequb LAPACK_GLOBAL(dgbequb,DGBEQUB)\n#define LAPACK_sgbequb LAPACK_GLOBAL(sgbequb,SGBEQUB)\n#define LAPACK_zgbequb LAPACK_GLOBAL(zgbequb,ZGBEQUB)\n#define LAPACK_cgbequb LAPACK_GLOBAL(cgbequb,CGBEQUB)\n#define LAPACK_spoequ LAPACK_GLOBAL(spoequ,SPOEQU)\n#define LAPACK_dpoequ LAPACK_GLOBAL(dpoequ,DPOEQU)\n#define LAPACK_cpoequ LAPACK_GLOBAL(cpoequ,CPOEQU)\n#define LAPACK_zpoequ LAPACK_GLOBAL(zpoequ,ZPOEQU)\n#define LAPACK_dpoequb LAPACK_GLOBAL(dpoequb,DPOEQUB)\n#define LAPACK_spoequb LAPACK_GLOBAL(spoequb,SPOEQUB)\n#define LAPACK_zpoequb LAPACK_GLOBAL(zpoequb,ZPOEQUB)\n#define LAPACK_cpoequb LAPACK_GLOBAL(cpoequb,CPOEQUB)\n#define LAPACK_sppequ LAPACK_GLOBAL(sppequ,SPPEQU)\n#define LAPACK_dppequ LAPACK_GLOBAL(dppequ,DPPEQU)\n#define LAPACK_cppequ LAPACK_GLOBAL(cppequ,CPPEQU)\n#define LAPACK_zppequ LAPACK_GLOBAL(zppequ,ZPPEQU)\n#define LAPACK_spbequ LAPACK_GLOBAL(spbequ,SPBEQU)\n#define LAPACK_dpbequ LAPACK_GLOBAL(dpbequ,DPBEQU)\n#define LAPACK_cpbequ LAPACK_GLOBAL(cpbequ,CPBEQU)\n#define LAPACK_zpbequ LAPACK_GLOBAL(zpbequ,ZPBEQU)\n#define LAPACK_dsyequb LAPACK_GLOBAL(dsyequb,DSYEQUB)\n#define LAPACK_ssyequb LAPACK_GLOBAL(ssyequb,SSYEQUB)\n#define LAPACK_zsyequb LAPACK_GLOBAL(zsyequb,ZSYEQUB)\n#define LAPACK_csyequb LAPACK_GLOBAL(csyequb,CSYEQUB)\n#define LAPACK_zheequb LAPACK_GLOBAL(zheequb,ZHEEQUB)\n#define LAPACK_cheequb LAPACK_GLOBAL(cheequb,CHEEQUB)\n#define LAPACK_sgesv LAPACK_GLOBAL(sgesv,SGESV)\n#define LAPACK_dgesv LAPACK_GLOBAL(dgesv,DGESV)\n#define LAPACK_cgesv LAPACK_GLOBAL(cgesv,CGESV)\n#define LAPACK_zgesv LAPACK_GLOBAL(zgesv,ZGESV)\n#define LAPACK_dsgesv LAPACK_GLOBAL(dsgesv,DSGESV)\n#define LAPACK_zcgesv LAPACK_GLOBAL(zcgesv,ZCGESV)\n#define LAPACK_sgesvx LAPACK_GLOBAL(sgesvx,SGESVX)\n#define LAPACK_dgesvx LAPACK_GLOBAL(dgesvx,DGESVX)\n#define LAPACK_cgesvx LAPACK_GLOBAL(cgesvx,CGESVX)\n#define LAPACK_zgesvx LAPACK_GLOBAL(zgesvx,ZGESVX)\n#define LAPACK_dgesvxx LAPACK_GLOBAL(dgesvxx,DGESVXX)\n#define LAPACK_sgesvxx LAPACK_GLOBAL(sgesvxx,SGESVXX)\n#define LAPACK_zgesvxx LAPACK_GLOBAL(zgesvxx,ZGESVXX)\n#define LAPACK_cgesvxx LAPACK_GLOBAL(cgesvxx,CGESVXX)\n#define LAPACK_sgbsv LAPACK_GLOBAL(sgbsv,SGBSV)\n#define LAPACK_dgbsv LAPACK_GLOBAL(dgbsv,DGBSV)\n#define LAPACK_cgbsv LAPACK_GLOBAL(cgbsv,CGBSV)\n#define LAPACK_zgbsv LAPACK_GLOBAL(zgbsv,ZGBSV)\n#define LAPACK_sgbsvx LAPACK_GLOBAL(sgbsvx,SGBSVX)\n#define LAPACK_dgbsvx LAPACK_GLOBAL(dgbsvx,DGBSVX)\n#define LAPACK_cgbsvx LAPACK_GLOBAL(cgbsvx,CGBSVX)\n#define LAPACK_zgbsvx LAPACK_GLOBAL(zgbsvx,ZGBSVX)\n#define LAPACK_dgbsvxx LAPACK_GLOBAL(dgbsvxx,DGBSVXX)\n#define LAPACK_sgbsvxx LAPACK_GLOBAL(sgbsvxx,SGBSVXX)\n#define LAPACK_zgbsvxx LAPACK_GLOBAL(zgbsvxx,ZGBSVXX)\n#define LAPACK_cgbsvxx LAPACK_GLOBAL(cgbsvxx,CGBSVXX)\n#define LAPACK_sgtsv LAPACK_GLOBAL(sgtsv,SGTSV)\n#define LAPACK_dgtsv LAPACK_GLOBAL(dgtsv,DGTSV)\n#define LAPACK_cgtsv LAPACK_GLOBAL(cgtsv,CGTSV)\n#define LAPACK_zgtsv LAPACK_GLOBAL(zgtsv,ZGTSV)\n#define LAPACK_sgtsvx LAPACK_GLOBAL(sgtsvx,SGTSVX)\n#define LAPACK_dgtsvx LAPACK_GLOBAL(dgtsvx,DGTSVX)\n#define LAPACK_cgtsvx LAPACK_GLOBAL(cgtsvx,CGTSVX)\n#define LAPACK_zgtsvx LAPACK_GLOBAL(zgtsvx,ZGTSVX)\n#define LAPACK_sposv LAPACK_GLOBAL(sposv,SPOSV)\n#define LAPACK_dposv LAPACK_GLOBAL(dposv,DPOSV)\n#define LAPACK_cposv LAPACK_GLOBAL(cposv,CPOSV)\n#define LAPACK_zposv LAPACK_GLOBAL(zposv,ZPOSV)\n#define LAPACK_dsposv LAPACK_GLOBAL(dsposv,DSPOSV)\n#define LAPACK_zcposv LAPACK_GLOBAL(zcposv,ZCPOSV)\n#define LAPACK_sposvx LAPACK_GLOBAL(sposvx,SPOSVX)\n#define LAPACK_dposvx LAPACK_GLOBAL(dposvx,DPOSVX)\n#define LAPACK_cposvx LAPACK_GLOBAL(cposvx,CPOSVX)\n#define LAPACK_zposvx LAPACK_GLOBAL(zposvx,ZPOSVX)\n#define LAPACK_dposvxx LAPACK_GLOBAL(dposvxx,DPOSVXX)\n#define LAPACK_sposvxx LAPACK_GLOBAL(sposvxx,SPOSVXX)\n#define LAPACK_zposvxx LAPACK_GLOBAL(zposvxx,ZPOSVXX)\n#define LAPACK_cposvxx LAPACK_GLOBAL(cposvxx,CPOSVXX)\n#define LAPACK_sppsv LAPACK_GLOBAL(sppsv,SPPSV)\n#define LAPACK_dppsv LAPACK_GLOBAL(dppsv,DPPSV)\n#define LAPACK_cppsv LAPACK_GLOBAL(cppsv,CPPSV)\n#define LAPACK_zppsv LAPACK_GLOBAL(zppsv,ZPPSV)\n#define LAPACK_sppsvx LAPACK_GLOBAL(sppsvx,SPPSVX)\n#define LAPACK_dppsvx LAPACK_GLOBAL(dppsvx,DPPSVX)\n#define LAPACK_cppsvx LAPACK_GLOBAL(cppsvx,CPPSVX)\n#define LAPACK_zppsvx LAPACK_GLOBAL(zppsvx,ZPPSVX)\n#define LAPACK_spbsv LAPACK_GLOBAL(spbsv,SPBSV)\n#define LAPACK_dpbsv LAPACK_GLOBAL(dpbsv,DPBSV)\n#define LAPACK_cpbsv LAPACK_GLOBAL(cpbsv,CPBSV)\n#define LAPACK_zpbsv LAPACK_GLOBAL(zpbsv,ZPBSV)\n#define LAPACK_spbsvx LAPACK_GLOBAL(spbsvx,SPBSVX)\n#define LAPACK_dpbsvx LAPACK_GLOBAL(dpbsvx,DPBSVX)\n#define LAPACK_cpbsvx LAPACK_GLOBAL(cpbsvx,CPBSVX)\n#define LAPACK_zpbsvx LAPACK_GLOBAL(zpbsvx,ZPBSVX)\n#define LAPACK_sptsv LAPACK_GLOBAL(sptsv,SPTSV)\n#define LAPACK_dptsv LAPACK_GLOBAL(dptsv,DPTSV)\n#define LAPACK_cptsv LAPACK_GLOBAL(cptsv,CPTSV)\n#define LAPACK_zptsv LAPACK_GLOBAL(zptsv,ZPTSV)\n#define LAPACK_sptsvx LAPACK_GLOBAL(sptsvx,SPTSVX)\n#define LAPACK_dptsvx LAPACK_GLOBAL(dptsvx,DPTSVX)\n#define LAPACK_cptsvx LAPACK_GLOBAL(cptsvx,CPTSVX)\n#define LAPACK_zptsvx LAPACK_GLOBAL(zptsvx,ZPTSVX)\n#define LAPACK_ssysv LAPACK_GLOBAL(ssysv,SSYSV)\n#define LAPACK_dsysv LAPACK_GLOBAL(dsysv,DSYSV)\n#define LAPACK_csysv LAPACK_GLOBAL(csysv,CSYSV)\n#define LAPACK_zsysv LAPACK_GLOBAL(zsysv,ZSYSV)\n#define LAPACK_ssysvx LAPACK_GLOBAL(ssysvx,SSYSVX)\n#define LAPACK_dsysvx LAPACK_GLOBAL(dsysvx,DSYSVX)\n#define LAPACK_csysvx LAPACK_GLOBAL(csysvx,CSYSVX)\n#define LAPACK_zsysvx LAPACK_GLOBAL(zsysvx,ZSYSVX)\n#define LAPACK_dsysvxx LAPACK_GLOBAL(dsysvxx,DSYSVXX)\n#define LAPACK_ssysvxx LAPACK_GLOBAL(ssysvxx,SSYSVXX)\n#define LAPACK_zsysvxx LAPACK_GLOBAL(zsysvxx,ZSYSVXX)\n#define LAPACK_csysvxx LAPACK_GLOBAL(csysvxx,CSYSVXX)\n#define LAPACK_chesv LAPACK_GLOBAL(chesv,CHESV)\n#define LAPACK_zhesv LAPACK_GLOBAL(zhesv,ZHESV)\n#define LAPACK_chesvx LAPACK_GLOBAL(chesvx,CHESVX)\n#define LAPACK_zhesvx LAPACK_GLOBAL(zhesvx,ZHESVX)\n#define LAPACK_zhesvxx LAPACK_GLOBAL(zhesvxx,ZHESVXX)\n#define LAPACK_chesvxx LAPACK_GLOBAL(chesvxx,CHESVXX)\n#define LAPACK_sspsv LAPACK_GLOBAL(sspsv,SSPSV)\n#define LAPACK_dspsv LAPACK_GLOBAL(dspsv,DSPSV)\n#define LAPACK_cspsv LAPACK_GLOBAL(cspsv,CSPSV)\n#define LAPACK_zspsv LAPACK_GLOBAL(zspsv,ZSPSV)\n#define LAPACK_sspsvx LAPACK_GLOBAL(sspsvx,SSPSVX)\n#define LAPACK_dspsvx LAPACK_GLOBAL(dspsvx,DSPSVX)\n#define LAPACK_cspsvx LAPACK_GLOBAL(cspsvx,CSPSVX)\n#define LAPACK_zspsvx LAPACK_GLOBAL(zspsvx,ZSPSVX)\n#define LAPACK_chpsv LAPACK_GLOBAL(chpsv,CHPSV)\n#define LAPACK_zhpsv LAPACK_GLOBAL(zhpsv,ZHPSV)\n#define LAPACK_chpsvx LAPACK_GLOBAL(chpsvx,CHPSVX)\n#define LAPACK_zhpsvx LAPACK_GLOBAL(zhpsvx,ZHPSVX)\n#define LAPACK_sgeqrf LAPACK_GLOBAL(sgeqrf,SGEQRF)\n#define LAPACK_dgeqrf LAPACK_GLOBAL(dgeqrf,DGEQRF)\n#define LAPACK_cgeqrf LAPACK_GLOBAL(cgeqrf,CGEQRF)\n#define LAPACK_zgeqrf LAPACK_GLOBAL(zgeqrf,ZGEQRF)\n#define LAPACK_sgeqpf LAPACK_GLOBAL(sgeqpf,SGEQPF)\n#define LAPACK_dgeqpf LAPACK_GLOBAL(dgeqpf,DGEQPF)\n#define LAPACK_cgeqpf LAPACK_GLOBAL(cgeqpf,CGEQPF)\n#define LAPACK_zgeqpf LAPACK_GLOBAL(zgeqpf,ZGEQPF)\n#define LAPACK_sgeqp3 LAPACK_GLOBAL(sgeqp3,SGEQP3)\n#define LAPACK_dgeqp3 LAPACK_GLOBAL(dgeqp3,DGEQP3)\n#define LAPACK_cgeqp3 LAPACK_GLOBAL(cgeqp3,CGEQP3)\n#define LAPACK_zgeqp3 LAPACK_GLOBAL(zgeqp3,ZGEQP3)\n#define LAPACK_sorgqr LAPACK_GLOBAL(sorgqr,SORGQR)\n#define LAPACK_dorgqr LAPACK_GLOBAL(dorgqr,DORGQR)\n#define LAPACK_sormqr LAPACK_GLOBAL(sormqr,SORMQR)\n#define LAPACK_dormqr LAPACK_GLOBAL(dormqr,DORMQR)\n#define LAPACK_cungqr LAPACK_GLOBAL(cungqr,CUNGQR)\n#define LAPACK_zungqr LAPACK_GLOBAL(zungqr,ZUNGQR)\n#define LAPACK_cunmqr LAPACK_GLOBAL(cunmqr,CUNMQR)\n#define LAPACK_zunmqr LAPACK_GLOBAL(zunmqr,ZUNMQR)\n#define LAPACK_sgelqf LAPACK_GLOBAL(sgelqf,SGELQF)\n#define LAPACK_dgelqf LAPACK_GLOBAL(dgelqf,DGELQF)\n#define LAPACK_cgelqf LAPACK_GLOBAL(cgelqf,CGELQF)\n#define LAPACK_zgelqf LAPACK_GLOBAL(zgelqf,ZGELQF)\n#define LAPACK_sorglq LAPACK_GLOBAL(sorglq,SORGLQ)\n#define LAPACK_dorglq LAPACK_GLOBAL(dorglq,DORGLQ)\n#define LAPACK_sormlq LAPACK_GLOBAL(sormlq,SORMLQ)\n#define LAPACK_dormlq LAPACK_GLOBAL(dormlq,DORMLQ)\n#define LAPACK_cunglq LAPACK_GLOBAL(cunglq,CUNGLQ)\n#define LAPACK_zunglq LAPACK_GLOBAL(zunglq,ZUNGLQ)\n#define LAPACK_cunmlq LAPACK_GLOBAL(cunmlq,CUNMLQ)\n#define LAPACK_zunmlq LAPACK_GLOBAL(zunmlq,ZUNMLQ)\n#define LAPACK_sgeqlf LAPACK_GLOBAL(sgeqlf,SGEQLF)\n#define LAPACK_dgeqlf LAPACK_GLOBAL(dgeqlf,DGEQLF)\n#define LAPACK_cgeqlf LAPACK_GLOBAL(cgeqlf,CGEQLF)\n#define LAPACK_zgeqlf LAPACK_GLOBAL(zgeqlf,ZGEQLF)\n#define LAPACK_sorgql LAPACK_GLOBAL(sorgql,SORGQL)\n#define LAPACK_dorgql LAPACK_GLOBAL(dorgql,DORGQL)\n#define LAPACK_cungql LAPACK_GLOBAL(cungql,CUNGQL)\n#define LAPACK_zungql LAPACK_GLOBAL(zungql,ZUNGQL)\n#define LAPACK_sormql LAPACK_GLOBAL(sormql,SORMQL)\n#define LAPACK_dormql LAPACK_GLOBAL(dormql,DORMQL)\n#define LAPACK_cunmql LAPACK_GLOBAL(cunmql,CUNMQL)\n#define LAPACK_zunmql LAPACK_GLOBAL(zunmql,ZUNMQL)\n#define LAPACK_sgerqf LAPACK_GLOBAL(sgerqf,SGERQF)\n#define LAPACK_dgerqf LAPACK_GLOBAL(dgerqf,DGERQF)\n#define LAPACK_cgerqf LAPACK_GLOBAL(cgerqf,CGERQF)\n#define LAPACK_zgerqf LAPACK_GLOBAL(zgerqf,ZGERQF)\n#define LAPACK_sorgrq LAPACK_GLOBAL(sorgrq,SORGRQ)\n#define LAPACK_dorgrq LAPACK_GLOBAL(dorgrq,DORGRQ)\n#define LAPACK_cungrq LAPACK_GLOBAL(cungrq,CUNGRQ)\n#define LAPACK_zungrq LAPACK_GLOBAL(zungrq,ZUNGRQ)\n#define LAPACK_sormrq LAPACK_GLOBAL(sormrq,SORMRQ)\n#define LAPACK_dormrq LAPACK_GLOBAL(dormrq,DORMRQ)\n#define LAPACK_cunmrq LAPACK_GLOBAL(cunmrq,CUNMRQ)\n#define LAPACK_zunmrq LAPACK_GLOBAL(zunmrq,ZUNMRQ)\n#define LAPACK_stzrzf LAPACK_GLOBAL(stzrzf,STZRZF)\n#define LAPACK_dtzrzf LAPACK_GLOBAL(dtzrzf,DTZRZF)\n#define LAPACK_ctzrzf LAPACK_GLOBAL(ctzrzf,CTZRZF)\n#define LAPACK_ztzrzf LAPACK_GLOBAL(ztzrzf,ZTZRZF)\n#define LAPACK_sormrz LAPACK_GLOBAL(sormrz,SORMRZ)\n#define LAPACK_dormrz LAPACK_GLOBAL(dormrz,DORMRZ)\n#define LAPACK_cunmrz LAPACK_GLOBAL(cunmrz,CUNMRZ)\n#define LAPACK_zunmrz LAPACK_GLOBAL(zunmrz,ZUNMRZ)\n#define LAPACK_sggqrf LAPACK_GLOBAL(sggqrf,SGGQRF)\n#define LAPACK_dggqrf LAPACK_GLOBAL(dggqrf,DGGQRF)\n#define LAPACK_cggqrf LAPACK_GLOBAL(cggqrf,CGGQRF)\n#define LAPACK_zggqrf LAPACK_GLOBAL(zggqrf,ZGGQRF)\n#define LAPACK_sggrqf LAPACK_GLOBAL(sggrqf,SGGRQF)\n#define LAPACK_dggrqf LAPACK_GLOBAL(dggrqf,DGGRQF)\n#define LAPACK_cggrqf LAPACK_GLOBAL(cggrqf,CGGRQF)\n#define LAPACK_zggrqf LAPACK_GLOBAL(zggrqf,ZGGRQF)\n#define LAPACK_sgebrd LAPACK_GLOBAL(sgebrd,SGEBRD)\n#define LAPACK_dgebrd LAPACK_GLOBAL(dgebrd,DGEBRD)\n#define LAPACK_cgebrd LAPACK_GLOBAL(cgebrd,CGEBRD)\n#define LAPACK_zgebrd LAPACK_GLOBAL(zgebrd,ZGEBRD)\n#define LAPACK_sgbbrd LAPACK_GLOBAL(sgbbrd,SGBBRD)\n#define LAPACK_dgbbrd LAPACK_GLOBAL(dgbbrd,DGBBRD)\n#define LAPACK_cgbbrd LAPACK_GLOBAL(cgbbrd,CGBBRD)\n#define LAPACK_zgbbrd LAPACK_GLOBAL(zgbbrd,ZGBBRD)\n#define LAPACK_sorgbr LAPACK_GLOBAL(sorgbr,SORGBR)\n#define LAPACK_dorgbr LAPACK_GLOBAL(dorgbr,DORGBR)\n#define LAPACK_sormbr LAPACK_GLOBAL(sormbr,SORMBR)\n#define LAPACK_dormbr LAPACK_GLOBAL(dormbr,DORMBR)\n#define LAPACK_cungbr LAPACK_GLOBAL(cungbr,CUNGBR)\n#define LAPACK_zungbr LAPACK_GLOBAL(zungbr,ZUNGBR)\n#define LAPACK_cunmbr LAPACK_GLOBAL(cunmbr,CUNMBR)\n#define LAPACK_zunmbr LAPACK_GLOBAL(zunmbr,ZUNMBR)\n#define LAPACK_sbdsqr LAPACK_GLOBAL(sbdsqr,SBDSQR)\n#define LAPACK_dbdsqr LAPACK_GLOBAL(dbdsqr,DBDSQR)\n#define LAPACK_cbdsqr LAPACK_GLOBAL(cbdsqr,CBDSQR)\n#define LAPACK_zbdsqr LAPACK_GLOBAL(zbdsqr,ZBDSQR)\n#define LAPACK_sbdsdc LAPACK_GLOBAL(sbdsdc,SBDSDC)\n#define LAPACK_dbdsdc LAPACK_GLOBAL(dbdsdc,DBDSDC)\n#define LAPACK_ssytrd LAPACK_GLOBAL(ssytrd,SSYTRD)\n#define LAPACK_dsytrd LAPACK_GLOBAL(dsytrd,DSYTRD)\n#define LAPACK_sorgtr LAPACK_GLOBAL(sorgtr,SORGTR)\n#define LAPACK_dorgtr LAPACK_GLOBAL(dorgtr,DORGTR)\n#define LAPACK_sormtr LAPACK_GLOBAL(sormtr,SORMTR)\n#define LAPACK_dormtr LAPACK_GLOBAL(dormtr,DORMTR)\n#define LAPACK_chetrd LAPACK_GLOBAL(chetrd,CHETRD)\n#define LAPACK_zhetrd LAPACK_GLOBAL(zhetrd,ZHETRD)\n#define LAPACK_cungtr LAPACK_GLOBAL(cungtr,CUNGTR)\n#define LAPACK_zungtr LAPACK_GLOBAL(zungtr,ZUNGTR)\n#define LAPACK_cunmtr LAPACK_GLOBAL(cunmtr,CUNMTR)\n#define LAPACK_zunmtr LAPACK_GLOBAL(zunmtr,ZUNMTR)\n#define LAPACK_ssptrd LAPACK_GLOBAL(ssptrd,SSPTRD)\n#define LAPACK_dsptrd LAPACK_GLOBAL(dsptrd,DSPTRD)\n#define LAPACK_sopgtr LAPACK_GLOBAL(sopgtr,SOPGTR)\n#define LAPACK_dopgtr LAPACK_GLOBAL(dopgtr,DOPGTR)\n#define LAPACK_sopmtr LAPACK_GLOBAL(sopmtr,SOPMTR)\n#define LAPACK_dopmtr LAPACK_GLOBAL(dopmtr,DOPMTR)\n#define LAPACK_chptrd LAPACK_GLOBAL(chptrd,CHPTRD)\n#define LAPACK_zhptrd LAPACK_GLOBAL(zhptrd,ZHPTRD)\n#define LAPACK_cupgtr LAPACK_GLOBAL(cupgtr,CUPGTR)\n#define LAPACK_zupgtr LAPACK_GLOBAL(zupgtr,ZUPGTR)\n#define LAPACK_cupmtr LAPACK_GLOBAL(cupmtr,CUPMTR)\n#define LAPACK_zupmtr LAPACK_GLOBAL(zupmtr,ZUPMTR)\n#define LAPACK_ssbtrd LAPACK_GLOBAL(ssbtrd,SSBTRD)\n#define LAPACK_dsbtrd LAPACK_GLOBAL(dsbtrd,DSBTRD)\n#define LAPACK_chbtrd LAPACK_GLOBAL(chbtrd,CHBTRD)\n#define LAPACK_zhbtrd LAPACK_GLOBAL(zhbtrd,ZHBTRD)\n#define LAPACK_ssterf LAPACK_GLOBAL(ssterf,SSTERF)\n#define LAPACK_dsterf LAPACK_GLOBAL(dsterf,DSTERF)\n#define LAPACK_ssteqr LAPACK_GLOBAL(ssteqr,SSTEQR)\n#define LAPACK_dsteqr LAPACK_GLOBAL(dsteqr,DSTEQR)\n#define LAPACK_csteqr LAPACK_GLOBAL(csteqr,CSTEQR)\n#define LAPACK_zsteqr LAPACK_GLOBAL(zsteqr,ZSTEQR)\n#define LAPACK_sstemr LAPACK_GLOBAL(sstemr,SSTEMR)\n#define LAPACK_dstemr LAPACK_GLOBAL(dstemr,DSTEMR)\n#define LAPACK_cstemr LAPACK_GLOBAL(cstemr,CSTEMR)\n#define LAPACK_zstemr LAPACK_GLOBAL(zstemr,ZSTEMR)\n#define LAPACK_sstedc LAPACK_GLOBAL(sstedc,SSTEDC)\n#define LAPACK_dstedc LAPACK_GLOBAL(dstedc,DSTEDC)\n#define LAPACK_cstedc LAPACK_GLOBAL(cstedc,CSTEDC)\n#define LAPACK_zstedc LAPACK_GLOBAL(zstedc,ZSTEDC)\n#define LAPACK_sstegr LAPACK_GLOBAL(sstegr,SSTEGR)\n#define LAPACK_dstegr LAPACK_GLOBAL(dstegr,DSTEGR)\n#define LAPACK_cstegr LAPACK_GLOBAL(cstegr,CSTEGR)\n#define LAPACK_zstegr LAPACK_GLOBAL(zstegr,ZSTEGR)\n#define LAPACK_spteqr LAPACK_GLOBAL(spteqr,SPTEQR)\n#define LAPACK_dpteqr LAPACK_GLOBAL(dpteqr,DPTEQR)\n#define LAPACK_cpteqr LAPACK_GLOBAL(cpteqr,CPTEQR)\n#define LAPACK_zpteqr LAPACK_GLOBAL(zpteqr,ZPTEQR)\n#define LAPACK_sstebz LAPACK_GLOBAL(sstebz,SSTEBZ)\n#define LAPACK_dstebz LAPACK_GLOBAL(dstebz,DSTEBZ)\n#define LAPACK_sstein LAPACK_GLOBAL(sstein,SSTEIN)\n#define LAPACK_dstein LAPACK_GLOBAL(dstein,DSTEIN)\n#define LAPACK_cstein LAPACK_GLOBAL(cstein,CSTEIN)\n#define LAPACK_zstein LAPACK_GLOBAL(zstein,ZSTEIN)\n#define LAPACK_sdisna LAPACK_GLOBAL(sdisna,SDISNA)\n#define LAPACK_ddisna LAPACK_GLOBAL(ddisna,DDISNA)\n#define LAPACK_ssygst LAPACK_GLOBAL(ssygst,SSYGST)\n#define LAPACK_dsygst LAPACK_GLOBAL(dsygst,DSYGST)\n#define LAPACK_chegst LAPACK_GLOBAL(chegst,CHEGST)\n#define LAPACK_zhegst LAPACK_GLOBAL(zhegst,ZHEGST)\n#define LAPACK_sspgst LAPACK_GLOBAL(sspgst,SSPGST)\n#define LAPACK_dspgst LAPACK_GLOBAL(dspgst,DSPGST)\n#define LAPACK_chpgst LAPACK_GLOBAL(chpgst,CHPGST)\n#define LAPACK_zhpgst LAPACK_GLOBAL(zhpgst,ZHPGST)\n#define LAPACK_ssbgst LAPACK_GLOBAL(ssbgst,SSBGST)\n#define LAPACK_dsbgst LAPACK_GLOBAL(dsbgst,DSBGST)\n#define LAPACK_chbgst LAPACK_GLOBAL(chbgst,CHBGST)\n#define LAPACK_zhbgst LAPACK_GLOBAL(zhbgst,ZHBGST)\n#define LAPACK_spbstf LAPACK_GLOBAL(spbstf,SPBSTF)\n#define LAPACK_dpbstf LAPACK_GLOBAL(dpbstf,DPBSTF)\n#define LAPACK_cpbstf LAPACK_GLOBAL(cpbstf,CPBSTF)\n#define LAPACK_zpbstf LAPACK_GLOBAL(zpbstf,ZPBSTF)\n#define LAPACK_sgehrd LAPACK_GLOBAL(sgehrd,SGEHRD)\n#define LAPACK_dgehrd LAPACK_GLOBAL(dgehrd,DGEHRD)\n#define LAPACK_cgehrd LAPACK_GLOBAL(cgehrd,CGEHRD)\n#define LAPACK_zgehrd LAPACK_GLOBAL(zgehrd,ZGEHRD)\n#define LAPACK_sorghr LAPACK_GLOBAL(sorghr,SORGHR)\n#define LAPACK_dorghr LAPACK_GLOBAL(dorghr,DORGHR)\n#define LAPACK_sormhr LAPACK_GLOBAL(sormhr,SORMHR)\n#define LAPACK_dormhr LAPACK_GLOBAL(dormhr,DORMHR)\n#define LAPACK_cunghr LAPACK_GLOBAL(cunghr,CUNGHR)\n#define LAPACK_zunghr LAPACK_GLOBAL(zunghr,ZUNGHR)\n#define LAPACK_cunmhr LAPACK_GLOBAL(cunmhr,CUNMHR)\n#define LAPACK_zunmhr LAPACK_GLOBAL(zunmhr,ZUNMHR)\n#define LAPACK_sgebal LAPACK_GLOBAL(sgebal,SGEBAL)\n#define LAPACK_dgebal LAPACK_GLOBAL(dgebal,DGEBAL)\n#define LAPACK_cgebal LAPACK_GLOBAL(cgebal,CGEBAL)\n#define LAPACK_zgebal LAPACK_GLOBAL(zgebal,ZGEBAL)\n#define LAPACK_sgebak LAPACK_GLOBAL(sgebak,SGEBAK)\n#define LAPACK_dgebak LAPACK_GLOBAL(dgebak,DGEBAK)\n#define LAPACK_cgebak LAPACK_GLOBAL(cgebak,CGEBAK)\n#define LAPACK_zgebak LAPACK_GLOBAL(zgebak,ZGEBAK)\n#define LAPACK_shseqr LAPACK_GLOBAL(shseqr,SHSEQR)\n#define LAPACK_dhseqr LAPACK_GLOBAL(dhseqr,DHSEQR)\n#define LAPACK_chseqr LAPACK_GLOBAL(chseqr,CHSEQR)\n#define LAPACK_zhseqr LAPACK_GLOBAL(zhseqr,ZHSEQR)\n#define LAPACK_shsein LAPACK_GLOBAL(shsein,SHSEIN)\n#define LAPACK_dhsein LAPACK_GLOBAL(dhsein,DHSEIN)\n#define LAPACK_chsein LAPACK_GLOBAL(chsein,CHSEIN)\n#define LAPACK_zhsein LAPACK_GLOBAL(zhsein,ZHSEIN)\n#define LAPACK_strevc LAPACK_GLOBAL(strevc,STREVC)\n#define LAPACK_dtrevc LAPACK_GLOBAL(dtrevc,DTREVC)\n#define LAPACK_ctrevc LAPACK_GLOBAL(ctrevc,CTREVC)\n#define LAPACK_ztrevc LAPACK_GLOBAL(ztrevc,ZTREVC)\n#define LAPACK_strsna LAPACK_GLOBAL(strsna,STRSNA)\n#define LAPACK_dtrsna LAPACK_GLOBAL(dtrsna,DTRSNA)\n#define LAPACK_ctrsna LAPACK_GLOBAL(ctrsna,CTRSNA)\n#define LAPACK_ztrsna LAPACK_GLOBAL(ztrsna,ZTRSNA)\n#define LAPACK_strexc LAPACK_GLOBAL(strexc,STREXC)\n#define LAPACK_dtrexc LAPACK_GLOBAL(dtrexc,DTREXC)\n#define LAPACK_ctrexc LAPACK_GLOBAL(ctrexc,CTREXC)\n#define LAPACK_ztrexc LAPACK_GLOBAL(ztrexc,ZTREXC)\n#define LAPACK_strsen LAPACK_GLOBAL(strsen,STRSEN)\n#define LAPACK_dtrsen LAPACK_GLOBAL(dtrsen,DTRSEN)\n#define LAPACK_ctrsen LAPACK_GLOBAL(ctrsen,CTRSEN)\n#define LAPACK_ztrsen LAPACK_GLOBAL(ztrsen,ZTRSEN)\n#define LAPACK_strsyl LAPACK_GLOBAL(strsyl,STRSYL)\n#define LAPACK_dtrsyl LAPACK_GLOBAL(dtrsyl,DTRSYL)\n#define LAPACK_ctrsyl LAPACK_GLOBAL(ctrsyl,CTRSYL)\n#define LAPACK_ztrsyl LAPACK_GLOBAL(ztrsyl,ZTRSYL)\n#define LAPACK_sgghrd LAPACK_GLOBAL(sgghrd,SGGHRD)\n#define LAPACK_dgghrd LAPACK_GLOBAL(dgghrd,DGGHRD)\n#define LAPACK_cgghrd LAPACK_GLOBAL(cgghrd,CGGHRD)\n#define LAPACK_zgghrd LAPACK_GLOBAL(zgghrd,ZGGHRD)\n#define LAPACK_sggbal LAPACK_GLOBAL(sggbal,SGGBAL)\n#define LAPACK_dggbal LAPACK_GLOBAL(dggbal,DGGBAL)\n#define LAPACK_cggbal LAPACK_GLOBAL(cggbal,CGGBAL)\n#define LAPACK_zggbal LAPACK_GLOBAL(zggbal,ZGGBAL)\n#define LAPACK_sggbak LAPACK_GLOBAL(sggbak,SGGBAK)\n#define LAPACK_dggbak LAPACK_GLOBAL(dggbak,DGGBAK)\n#define LAPACK_cggbak LAPACK_GLOBAL(cggbak,CGGBAK)\n#define LAPACK_zggbak LAPACK_GLOBAL(zggbak,ZGGBAK)\n#define LAPACK_shgeqz LAPACK_GLOBAL(shgeqz,SHGEQZ)\n#define LAPACK_dhgeqz LAPACK_GLOBAL(dhgeqz,DHGEQZ)\n#define LAPACK_chgeqz LAPACK_GLOBAL(chgeqz,CHGEQZ)\n#define LAPACK_zhgeqz LAPACK_GLOBAL(zhgeqz,ZHGEQZ)\n#define LAPACK_stgevc LAPACK_GLOBAL(stgevc,STGEVC)\n#define LAPACK_dtgevc LAPACK_GLOBAL(dtgevc,DTGEVC)\n#define LAPACK_ctgevc LAPACK_GLOBAL(ctgevc,CTGEVC)\n#define LAPACK_ztgevc LAPACK_GLOBAL(ztgevc,ZTGEVC)\n#define LAPACK_stgexc LAPACK_GLOBAL(stgexc,STGEXC)\n#define LAPACK_dtgexc LAPACK_GLOBAL(dtgexc,DTGEXC)\n#define LAPACK_ctgexc LAPACK_GLOBAL(ctgexc,CTGEXC)\n#define LAPACK_ztgexc LAPACK_GLOBAL(ztgexc,ZTGEXC)\n#define LAPACK_stgsen LAPACK_GLOBAL(stgsen,STGSEN)\n#define LAPACK_dtgsen LAPACK_GLOBAL(dtgsen,DTGSEN)\n#define LAPACK_ctgsen LAPACK_GLOBAL(ctgsen,CTGSEN)\n#define LAPACK_ztgsen LAPACK_GLOBAL(ztgsen,ZTGSEN)\n#define LAPACK_stgsyl LAPACK_GLOBAL(stgsyl,STGSYL)\n#define LAPACK_dtgsyl LAPACK_GLOBAL(dtgsyl,DTGSYL)\n#define LAPACK_ctgsyl LAPACK_GLOBAL(ctgsyl,CTGSYL)\n#define LAPACK_ztgsyl LAPACK_GLOBAL(ztgsyl,ZTGSYL)\n#define LAPACK_stgsna LAPACK_GLOBAL(stgsna,STGSNA)\n#define LAPACK_dtgsna LAPACK_GLOBAL(dtgsna,DTGSNA)\n#define LAPACK_ctgsna LAPACK_GLOBAL(ctgsna,CTGSNA)\n#define LAPACK_ztgsna LAPACK_GLOBAL(ztgsna,ZTGSNA)\n#define LAPACK_sggsvp LAPACK_GLOBAL(sggsvp,SGGSVP)\n#define LAPACK_dggsvp LAPACK_GLOBAL(dggsvp,DGGSVP)\n#define LAPACK_cggsvp LAPACK_GLOBAL(cggsvp,CGGSVP)\n#define LAPACK_zggsvp LAPACK_GLOBAL(zggsvp,ZGGSVP)\n#define LAPACK_stgsja LAPACK_GLOBAL(stgsja,STGSJA)\n#define LAPACK_dtgsja LAPACK_GLOBAL(dtgsja,DTGSJA)\n#define LAPACK_ctgsja LAPACK_GLOBAL(ctgsja,CTGSJA)\n#define LAPACK_ztgsja LAPACK_GLOBAL(ztgsja,ZTGSJA)\n#define LAPACK_sgels LAPACK_GLOBAL(sgels,SGELS)\n#define LAPACK_dgels LAPACK_GLOBAL(dgels,DGELS)\n#define LAPACK_cgels LAPACK_GLOBAL(cgels,CGELS)\n#define LAPACK_zgels LAPACK_GLOBAL(zgels,ZGELS)\n#define LAPACK_sgelsy LAPACK_GLOBAL(sgelsy,SGELSY)\n#define LAPACK_dgelsy LAPACK_GLOBAL(dgelsy,DGELSY)\n#define LAPACK_cgelsy LAPACK_GLOBAL(cgelsy,CGELSY)\n#define LAPACK_zgelsy LAPACK_GLOBAL(zgelsy,ZGELSY)\n#define LAPACK_sgelss LAPACK_GLOBAL(sgelss,SGELSS)\n#define LAPACK_dgelss LAPACK_GLOBAL(dgelss,DGELSS)\n#define LAPACK_cgelss LAPACK_GLOBAL(cgelss,CGELSS)\n#define LAPACK_zgelss LAPACK_GLOBAL(zgelss,ZGELSS)\n#define LAPACK_sgelsd LAPACK_GLOBAL(sgelsd,SGELSD)\n#define LAPACK_dgelsd LAPACK_GLOBAL(dgelsd,DGELSD)\n#define LAPACK_cgelsd LAPACK_GLOBAL(cgelsd,CGELSD)\n#define LAPACK_zgelsd LAPACK_GLOBAL(zgelsd,ZGELSD)\n#define LAPACK_sgglse LAPACK_GLOBAL(sgglse,SGGLSE)\n#define LAPACK_dgglse LAPACK_GLOBAL(dgglse,DGGLSE)\n#define LAPACK_cgglse LAPACK_GLOBAL(cgglse,CGGLSE)\n#define LAPACK_zgglse LAPACK_GLOBAL(zgglse,ZGGLSE)\n#define LAPACK_sggglm LAPACK_GLOBAL(sggglm,SGGGLM)\n#define LAPACK_dggglm LAPACK_GLOBAL(dggglm,DGGGLM)\n#define LAPACK_cggglm LAPACK_GLOBAL(cggglm,CGGGLM)\n#define LAPACK_zggglm LAPACK_GLOBAL(zggglm,ZGGGLM)\n#define LAPACK_ssyev LAPACK_GLOBAL(ssyev,SSYEV)\n#define LAPACK_dsyev LAPACK_GLOBAL(dsyev,DSYEV)\n#define LAPACK_cheev LAPACK_GLOBAL(cheev,CHEEV)\n#define LAPACK_zheev LAPACK_GLOBAL(zheev,ZHEEV)\n#define LAPACK_ssyevd LAPACK_GLOBAL(ssyevd,SSYEVD)\n#define LAPACK_dsyevd LAPACK_GLOBAL(dsyevd,DSYEVD)\n#define LAPACK_cheevd LAPACK_GLOBAL(cheevd,CHEEVD)\n#define LAPACK_zheevd LAPACK_GLOBAL(zheevd,ZHEEVD)\n#define LAPACK_ssyevx LAPACK_GLOBAL(ssyevx,SSYEVX)\n#define LAPACK_dsyevx LAPACK_GLOBAL(dsyevx,DSYEVX)\n#define LAPACK_cheevx LAPACK_GLOBAL(cheevx,CHEEVX)\n#define LAPACK_zheevx LAPACK_GLOBAL(zheevx,ZHEEVX)\n#define LAPACK_ssyevr LAPACK_GLOBAL(ssyevr,SSYEVR)\n#define LAPACK_dsyevr LAPACK_GLOBAL(dsyevr,DSYEVR)\n#define LAPACK_cheevr LAPACK_GLOBAL(cheevr,CHEEVR)\n#define LAPACK_zheevr LAPACK_GLOBAL(zheevr,ZHEEVR)\n#define LAPACK_sspev LAPACK_GLOBAL(sspev,SSPEV)\n#define LAPACK_dspev LAPACK_GLOBAL(dspev,DSPEV)\n#define LAPACK_chpev LAPACK_GLOBAL(chpev,CHPEV)\n#define LAPACK_zhpev LAPACK_GLOBAL(zhpev,ZHPEV)\n#define LAPACK_sspevd LAPACK_GLOBAL(sspevd,SSPEVD)\n#define LAPACK_dspevd LAPACK_GLOBAL(dspevd,DSPEVD)\n#define LAPACK_chpevd LAPACK_GLOBAL(chpevd,CHPEVD)\n#define LAPACK_zhpevd LAPACK_GLOBAL(zhpevd,ZHPEVD)\n#define LAPACK_sspevx LAPACK_GLOBAL(sspevx,SSPEVX)\n#define LAPACK_dspevx LAPACK_GLOBAL(dspevx,DSPEVX)\n#define LAPACK_chpevx LAPACK_GLOBAL(chpevx,CHPEVX)\n#define LAPACK_zhpevx LAPACK_GLOBAL(zhpevx,ZHPEVX)\n#define LAPACK_ssbev LAPACK_GLOBAL(ssbev,SSBEV)\n#define LAPACK_dsbev LAPACK_GLOBAL(dsbev,DSBEV)\n#define LAPACK_chbev LAPACK_GLOBAL(chbev,CHBEV)\n#define LAPACK_zhbev LAPACK_GLOBAL(zhbev,ZHBEV)\n#define LAPACK_ssbevd LAPACK_GLOBAL(ssbevd,SSBEVD)\n#define LAPACK_dsbevd LAPACK_GLOBAL(dsbevd,DSBEVD)\n#define LAPACK_chbevd LAPACK_GLOBAL(chbevd,CHBEVD)\n#define LAPACK_zhbevd LAPACK_GLOBAL(zhbevd,ZHBEVD)\n#define LAPACK_ssbevx LAPACK_GLOBAL(ssbevx,SSBEVX)\n#define LAPACK_dsbevx LAPACK_GLOBAL(dsbevx,DSBEVX)\n#define LAPACK_chbevx LAPACK_GLOBAL(chbevx,CHBEVX)\n#define LAPACK_zhbevx LAPACK_GLOBAL(zhbevx,ZHBEVX)\n#define LAPACK_sstev LAPACK_GLOBAL(sstev,SSTEV)\n#define LAPACK_dstev LAPACK_GLOBAL(dstev,DSTEV)\n#define LAPACK_sstevd LAPACK_GLOBAL(sstevd,SSTEVD)\n#define LAPACK_dstevd LAPACK_GLOBAL(dstevd,DSTEVD)\n#define LAPACK_sstevx LAPACK_GLOBAL(sstevx,SSTEVX)\n#define LAPACK_dstevx LAPACK_GLOBAL(dstevx,DSTEVX)\n#define LAPACK_sstevr LAPACK_GLOBAL(sstevr,SSTEVR)\n#define LAPACK_dstevr LAPACK_GLOBAL(dstevr,DSTEVR)\n#define LAPACK_sgees LAPACK_GLOBAL(sgees,SGEES)\n#define LAPACK_dgees LAPACK_GLOBAL(dgees,DGEES)\n#define LAPACK_cgees LAPACK_GLOBAL(cgees,CGEES)\n#define LAPACK_zgees LAPACK_GLOBAL(zgees,ZGEES)\n#define LAPACK_sgeesx LAPACK_GLOBAL(sgeesx,SGEESX)\n#define LAPACK_dgeesx LAPACK_GLOBAL(dgeesx,DGEESX)\n#define LAPACK_cgeesx LAPACK_GLOBAL(cgeesx,CGEESX)\n#define LAPACK_zgeesx LAPACK_GLOBAL(zgeesx,ZGEESX)\n#define LAPACK_sgeev LAPACK_GLOBAL(sgeev,SGEEV)\n#define LAPACK_dgeev LAPACK_GLOBAL(dgeev,DGEEV)\n#define LAPACK_cgeev LAPACK_GLOBAL(cgeev,CGEEV)\n#define LAPACK_zgeev LAPACK_GLOBAL(zgeev,ZGEEV)\n#define LAPACK_sgeevx LAPACK_GLOBAL(sgeevx,SGEEVX)\n#define LAPACK_dgeevx LAPACK_GLOBAL(dgeevx,DGEEVX)\n#define LAPACK_cgeevx LAPACK_GLOBAL(cgeevx,CGEEVX)\n#define LAPACK_zgeevx LAPACK_GLOBAL(zgeevx,ZGEEVX)\n#define LAPACK_sgesvd LAPACK_GLOBAL(sgesvd,SGESVD)\n#define LAPACK_dgesvd LAPACK_GLOBAL(dgesvd,DGESVD)\n#define LAPACK_cgesvd LAPACK_GLOBAL(cgesvd,CGESVD)\n#define LAPACK_zgesvd LAPACK_GLOBAL(zgesvd,ZGESVD)\n#define LAPACK_sgesdd LAPACK_GLOBAL(sgesdd,SGESDD)\n#define LAPACK_dgesdd LAPACK_GLOBAL(dgesdd,DGESDD)\n#define LAPACK_cgesdd LAPACK_GLOBAL(cgesdd,CGESDD)\n#define LAPACK_zgesdd LAPACK_GLOBAL(zgesdd,ZGESDD)\n#define LAPACK_dgejsv LAPACK_GLOBAL(dgejsv,DGEJSV)\n#define LAPACK_sgejsv LAPACK_GLOBAL(sgejsv,SGEJSV)\n#define LAPACK_dgesvj LAPACK_GLOBAL(dgesvj,DGESVJ)\n#define LAPACK_sgesvj LAPACK_GLOBAL(sgesvj,SGESVJ)\n#define LAPACK_sggsvd LAPACK_GLOBAL(sggsvd,SGGSVD)\n#define LAPACK_dggsvd LAPACK_GLOBAL(dggsvd,DGGSVD)\n#define LAPACK_cggsvd LAPACK_GLOBAL(cggsvd,CGGSVD)\n#define LAPACK_zggsvd LAPACK_GLOBAL(zggsvd,ZGGSVD)\n#define LAPACK_ssygv LAPACK_GLOBAL(ssygv,SSYGV)\n#define LAPACK_dsygv LAPACK_GLOBAL(dsygv,DSYGV)\n#define LAPACK_chegv LAPACK_GLOBAL(chegv,CHEGV)\n#define LAPACK_zhegv LAPACK_GLOBAL(zhegv,ZHEGV)\n#define LAPACK_ssygvd LAPACK_GLOBAL(ssygvd,SSYGVD)\n#define LAPACK_dsygvd LAPACK_GLOBAL(dsygvd,DSYGVD)\n#define LAPACK_chegvd LAPACK_GLOBAL(chegvd,CHEGVD)\n#define LAPACK_zhegvd LAPACK_GLOBAL(zhegvd,ZHEGVD)\n#define LAPACK_ssygvx LAPACK_GLOBAL(ssygvx,SSYGVX)\n#define LAPACK_dsygvx LAPACK_GLOBAL(dsygvx,DSYGVX)\n#define LAPACK_chegvx LAPACK_GLOBAL(chegvx,CHEGVX)\n#define LAPACK_zhegvx LAPACK_GLOBAL(zhegvx,ZHEGVX)\n#define LAPACK_sspgv LAPACK_GLOBAL(sspgv,SSPGV)\n#define LAPACK_dspgv LAPACK_GLOBAL(dspgv,DSPGV)\n#define LAPACK_chpgv LAPACK_GLOBAL(chpgv,CHPGV)\n#define LAPACK_zhpgv LAPACK_GLOBAL(zhpgv,ZHPGV)\n#define LAPACK_sspgvd LAPACK_GLOBAL(sspgvd,SSPGVD)\n#define LAPACK_dspgvd LAPACK_GLOBAL(dspgvd,DSPGVD)\n#define LAPACK_chpgvd LAPACK_GLOBAL(chpgvd,CHPGVD)\n#define LAPACK_zhpgvd LAPACK_GLOBAL(zhpgvd,ZHPGVD)\n#define LAPACK_sspgvx LAPACK_GLOBAL(sspgvx,SSPGVX)\n#define LAPACK_dspgvx LAPACK_GLOBAL(dspgvx,DSPGVX)\n#define LAPACK_chpgvx LAPACK_GLOBAL(chpgvx,CHPGVX)\n#define LAPACK_zhpgvx LAPACK_GLOBAL(zhpgvx,ZHPGVX)\n#define LAPACK_ssbgv LAPACK_GLOBAL(ssbgv,SSBGV)\n#define LAPACK_dsbgv LAPACK_GLOBAL(dsbgv,DSBGV)\n#define LAPACK_chbgv LAPACK_GLOBAL(chbgv,CHBGV)\n#define LAPACK_zhbgv LAPACK_GLOBAL(zhbgv,ZHBGV)\n#define LAPACK_ssbgvd LAPACK_GLOBAL(ssbgvd,SSBGVD)\n#define LAPACK_dsbgvd LAPACK_GLOBAL(dsbgvd,DSBGVD)\n#define LAPACK_chbgvd LAPACK_GLOBAL(chbgvd,CHBGVD)\n#define LAPACK_zhbgvd LAPACK_GLOBAL(zhbgvd,ZHBGVD)\n#define LAPACK_ssbgvx LAPACK_GLOBAL(ssbgvx,SSBGVX)\n#define LAPACK_dsbgvx LAPACK_GLOBAL(dsbgvx,DSBGVX)\n#define LAPACK_chbgvx LAPACK_GLOBAL(chbgvx,CHBGVX)\n#define LAPACK_zhbgvx LAPACK_GLOBAL(zhbgvx,ZHBGVX)\n#define LAPACK_sgges LAPACK_GLOBAL(sgges,SGGES)\n#define LAPACK_dgges LAPACK_GLOBAL(dgges,DGGES)\n#define LAPACK_cgges LAPACK_GLOBAL(cgges,CGGES)\n#define LAPACK_zgges LAPACK_GLOBAL(zgges,ZGGES)\n#define LAPACK_sggesx LAPACK_GLOBAL(sggesx,SGGESX)\n#define LAPACK_dggesx LAPACK_GLOBAL(dggesx,DGGESX)\n#define LAPACK_cggesx LAPACK_GLOBAL(cggesx,CGGESX)\n#define LAPACK_zggesx LAPACK_GLOBAL(zggesx,ZGGESX)\n#define LAPACK_sggev LAPACK_GLOBAL(sggev,SGGEV)\n#define LAPACK_dggev LAPACK_GLOBAL(dggev,DGGEV)\n#define LAPACK_cggev LAPACK_GLOBAL(cggev,CGGEV)\n#define LAPACK_zggev LAPACK_GLOBAL(zggev,ZGGEV)\n#define LAPACK_sggevx LAPACK_GLOBAL(sggevx,SGGEVX)\n#define LAPACK_dggevx LAPACK_GLOBAL(dggevx,DGGEVX)\n#define LAPACK_cggevx LAPACK_GLOBAL(cggevx,CGGEVX)\n#define LAPACK_zggevx LAPACK_GLOBAL(zggevx,ZGGEVX)\n#define LAPACK_dsfrk LAPACK_GLOBAL(dsfrk,DSFRK)\n#define LAPACK_ssfrk LAPACK_GLOBAL(ssfrk,SSFRK)\n#define LAPACK_zhfrk LAPACK_GLOBAL(zhfrk,ZHFRK)\n#define LAPACK_chfrk LAPACK_GLOBAL(chfrk,CHFRK)\n#define LAPACK_dtfsm LAPACK_GLOBAL(dtfsm,DTFSM)\n#define LAPACK_stfsm LAPACK_GLOBAL(stfsm,STFSM)\n#define LAPACK_ztfsm LAPACK_GLOBAL(ztfsm,ZTFSM)\n#define LAPACK_ctfsm LAPACK_GLOBAL(ctfsm,CTFSM)\n#define LAPACK_dtfttp LAPACK_GLOBAL(dtfttp,DTFTTP)\n#define LAPACK_stfttp LAPACK_GLOBAL(stfttp,STFTTP)\n#define LAPACK_ztfttp LAPACK_GLOBAL(ztfttp,ZTFTTP)\n#define LAPACK_ctfttp LAPACK_GLOBAL(ctfttp,CTFTTP)\n#define LAPACK_dtfttr LAPACK_GLOBAL(dtfttr,DTFTTR)\n#define LAPACK_stfttr LAPACK_GLOBAL(stfttr,STFTTR)\n#define LAPACK_ztfttr LAPACK_GLOBAL(ztfttr,ZTFTTR)\n#define LAPACK_ctfttr LAPACK_GLOBAL(ctfttr,CTFTTR)\n#define LAPACK_dtpttf LAPACK_GLOBAL(dtpttf,DTPTTF)\n#define LAPACK_stpttf LAPACK_GLOBAL(stpttf,STPTTF)\n#define LAPACK_ztpttf LAPACK_GLOBAL(ztpttf,ZTPTTF)\n#define LAPACK_ctpttf LAPACK_GLOBAL(ctpttf,CTPTTF)\n#define LAPACK_dtpttr LAPACK_GLOBAL(dtpttr,DTPTTR)\n#define LAPACK_stpttr LAPACK_GLOBAL(stpttr,STPTTR)\n#define LAPACK_ztpttr LAPACK_GLOBAL(ztpttr,ZTPTTR)\n#define LAPACK_ctpttr LAPACK_GLOBAL(ctpttr,CTPTTR)\n#define LAPACK_dtrttf LAPACK_GLOBAL(dtrttf,DTRTTF)\n#define LAPACK_strttf LAPACK_GLOBAL(strttf,STRTTF)\n#define LAPACK_ztrttf LAPACK_GLOBAL(ztrttf,ZTRTTF)\n#define LAPACK_ctrttf LAPACK_GLOBAL(ctrttf,CTRTTF)\n#define LAPACK_dtrttp LAPACK_GLOBAL(dtrttp,DTRTTP)\n#define LAPACK_strttp LAPACK_GLOBAL(strttp,STRTTP)\n#define LAPACK_ztrttp LAPACK_GLOBAL(ztrttp,ZTRTTP)\n#define LAPACK_ctrttp LAPACK_GLOBAL(ctrttp,CTRTTP)\n#define LAPACK_sgeqrfp LAPACK_GLOBAL(sgeqrfp,SGEQRFP)\n#define LAPACK_dgeqrfp LAPACK_GLOBAL(dgeqrfp,DGEQRFP)\n#define LAPACK_cgeqrfp LAPACK_GLOBAL(cgeqrfp,CGEQRFP)\n#define LAPACK_zgeqrfp LAPACK_GLOBAL(zgeqrfp,ZGEQRFP)\n#define LAPACK_clacgv LAPACK_GLOBAL(clacgv,CLACGV)\n#define LAPACK_zlacgv LAPACK_GLOBAL(zlacgv,ZLACGV)\n#define LAPACK_slarnv LAPACK_GLOBAL(slarnv,SLARNV)\n#define LAPACK_dlarnv LAPACK_GLOBAL(dlarnv,DLARNV)\n#define LAPACK_clarnv LAPACK_GLOBAL(clarnv,CLARNV)\n#define LAPACK_zlarnv LAPACK_GLOBAL(zlarnv,ZLARNV)\n#define LAPACK_sgeqr2 LAPACK_GLOBAL(sgeqr2,SGEQR2)\n#define LAPACK_dgeqr2 LAPACK_GLOBAL(dgeqr2,DGEQR2)\n#define LAPACK_cgeqr2 LAPACK_GLOBAL(cgeqr2,CGEQR2)\n#define LAPACK_zgeqr2 LAPACK_GLOBAL(zgeqr2,ZGEQR2)\n#define LAPACK_slacn2 LAPACK_GLOBAL(slacn2,SLACN2)\n#define LAPACK_dlacn2 LAPACK_GLOBAL(dlacn2,DLACN2)\n#define LAPACK_clacn2 LAPACK_GLOBAL(clacn2,CLACN2)\n#define LAPACK_zlacn2 LAPACK_GLOBAL(zlacn2,ZLACN2)\n#define LAPACK_slacpy LAPACK_GLOBAL(slacpy,SLACPY)\n#define LAPACK_dlacpy LAPACK_GLOBAL(dlacpy,DLACPY)\n#define LAPACK_clacpy LAPACK_GLOBAL(clacpy,CLACPY)\n#define LAPACK_zlacpy LAPACK_GLOBAL(zlacpy,ZLACPY)\n#define LAPACK_clacp2 LAPACK_GLOBAL(clacp2,CLACP2)\n#define LAPACK_zlacp2 LAPACK_GLOBAL(zlacp2,ZLACP2)\n#define LAPACK_sgetf2 LAPACK_GLOBAL(sgetf2,SGETF2)\n#define LAPACK_dgetf2 LAPACK_GLOBAL(dgetf2,DGETF2)\n#define LAPACK_cgetf2 LAPACK_GLOBAL(cgetf2,CGETF2)\n#define LAPACK_zgetf2 LAPACK_GLOBAL(zgetf2,ZGETF2)\n#define LAPACK_slaswp LAPACK_GLOBAL(slaswp,SLASWP)\n#define LAPACK_dlaswp LAPACK_GLOBAL(dlaswp,DLASWP)\n#define LAPACK_claswp LAPACK_GLOBAL(claswp,CLASWP)\n#define LAPACK_zlaswp LAPACK_GLOBAL(zlaswp,ZLASWP)\n#define LAPACK_slange LAPACK_GLOBAL(slange,SLANGE)\n#define LAPACK_dlange LAPACK_GLOBAL(dlange,DLANGE)\n#define LAPACK_clange LAPACK_GLOBAL(clange,CLANGE)\n#define LAPACK_zlange LAPACK_GLOBAL(zlange,ZLANGE)\n#define LAPACK_clanhe LAPACK_GLOBAL(clanhe,CLANHE)\n#define LAPACK_zlanhe LAPACK_GLOBAL(zlanhe,ZLANHE)\n#define LAPACK_slansy LAPACK_GLOBAL(slansy,SLANSY)\n#define LAPACK_dlansy LAPACK_GLOBAL(dlansy,DLANSY)\n#define LAPACK_clansy LAPACK_GLOBAL(clansy,CLANSY)\n#define LAPACK_zlansy LAPACK_GLOBAL(zlansy,ZLANSY)\n#define LAPACK_slantr LAPACK_GLOBAL(slantr,SLANTR)\n#define LAPACK_dlantr LAPACK_GLOBAL(dlantr,DLANTR)\n#define LAPACK_clantr LAPACK_GLOBAL(clantr,CLANTR)\n#define LAPACK_zlantr LAPACK_GLOBAL(zlantr,ZLANTR)\n#define LAPACK_slamch LAPACK_GLOBAL(slamch,SLAMCH)\n#define LAPACK_dlamch LAPACK_GLOBAL(dlamch,DLAMCH)\n#define LAPACK_sgelq2 LAPACK_GLOBAL(sgelq2,SGELQ2)\n#define LAPACK_dgelq2 LAPACK_GLOBAL(dgelq2,DGELQ2)\n#define LAPACK_cgelq2 LAPACK_GLOBAL(cgelq2,CGELQ2)\n#define LAPACK_zgelq2 LAPACK_GLOBAL(zgelq2,ZGELQ2)\n#define LAPACK_slarfb LAPACK_GLOBAL(slarfb,SLARFB)\n#define LAPACK_dlarfb LAPACK_GLOBAL(dlarfb,DLARFB)\n#define LAPACK_clarfb LAPACK_GLOBAL(clarfb,CLARFB)\n#define LAPACK_zlarfb LAPACK_GLOBAL(zlarfb,ZLARFB)\n#define LAPACK_slarfg LAPACK_GLOBAL(slarfg,SLARFG)\n#define LAPACK_dlarfg LAPACK_GLOBAL(dlarfg,DLARFG)\n#define LAPACK_clarfg LAPACK_GLOBAL(clarfg,CLARFG)\n#define LAPACK_zlarfg LAPACK_GLOBAL(zlarfg,ZLARFG)\n#define LAPACK_slarft LAPACK_GLOBAL(slarft,SLARFT)\n#define LAPACK_dlarft LAPACK_GLOBAL(dlarft,DLARFT)\n#define LAPACK_clarft LAPACK_GLOBAL(clarft,CLARFT)\n#define LAPACK_zlarft LAPACK_GLOBAL(zlarft,ZLARFT)\n#define LAPACK_slarfx LAPACK_GLOBAL(slarfx,SLARFX)\n#define LAPACK_dlarfx LAPACK_GLOBAL(dlarfx,DLARFX)\n#define LAPACK_clarfx LAPACK_GLOBAL(clarfx,CLARFX)\n#define LAPACK_zlarfx LAPACK_GLOBAL(zlarfx,ZLARFX)\n#define LAPACK_slatms LAPACK_GLOBAL(slatms,SLATMS)\n#define LAPACK_dlatms LAPACK_GLOBAL(dlatms,DLATMS)\n#define LAPACK_clatms LAPACK_GLOBAL(clatms,CLATMS)\n#define LAPACK_zlatms LAPACK_GLOBAL(zlatms,ZLATMS)\n#define LAPACK_slag2d LAPACK_GLOBAL(slag2d,SLAG2D)\n#define LAPACK_dlag2s LAPACK_GLOBAL(dlag2s,DLAG2S)\n#define LAPACK_clag2z LAPACK_GLOBAL(clag2z,CLAG2Z)\n#define LAPACK_zlag2c LAPACK_GLOBAL(zlag2c,ZLAG2C)\n#define LAPACK_slauum LAPACK_GLOBAL(slauum,SLAUUM)\n#define LAPACK_dlauum LAPACK_GLOBAL(dlauum,DLAUUM)\n#define LAPACK_clauum LAPACK_GLOBAL(clauum,CLAUUM)\n#define LAPACK_zlauum LAPACK_GLOBAL(zlauum,ZLAUUM)\n#define LAPACK_slagge LAPACK_GLOBAL(slagge,SLAGGE)\n#define LAPACK_dlagge LAPACK_GLOBAL(dlagge,DLAGGE)\n#define LAPACK_clagge LAPACK_GLOBAL(clagge,CLAGGE)\n#define LAPACK_zlagge LAPACK_GLOBAL(zlagge,ZLAGGE)\n#define LAPACK_slaset LAPACK_GLOBAL(slaset,SLASET)\n#define LAPACK_dlaset LAPACK_GLOBAL(dlaset,DLASET)\n#define LAPACK_claset LAPACK_GLOBAL(claset,CLASET)\n#define LAPACK_zlaset LAPACK_GLOBAL(zlaset,ZLASET)\n#define LAPACK_slasrt LAPACK_GLOBAL(slasrt,SLASRT)\n#define LAPACK_dlasrt LAPACK_GLOBAL(dlasrt,DLASRT)\n#define LAPACK_slagsy LAPACK_GLOBAL(slagsy,SLAGSY)\n#define LAPACK_dlagsy LAPACK_GLOBAL(dlagsy,DLAGSY)\n#define LAPACK_clagsy LAPACK_GLOBAL(clagsy,CLAGSY)\n#define LAPACK_zlagsy LAPACK_GLOBAL(zlagsy,ZLAGSY)\n#define LAPACK_claghe LAPACK_GLOBAL(claghe,CLAGHE)\n#define LAPACK_zlaghe LAPACK_GLOBAL(zlaghe,ZLAGHE)\n#define LAPACK_slapmr LAPACK_GLOBAL(slapmr,SLAPMR)\n#define LAPACK_dlapmr LAPACK_GLOBAL(dlapmr,DLAPMR)\n#define LAPACK_clapmr LAPACK_GLOBAL(clapmr,CLAPMR)\n#define LAPACK_zlapmr LAPACK_GLOBAL(zlapmr,ZLAPMR)\n#define LAPACK_slapy2 LAPACK_GLOBAL(slapy2,SLAPY2)\n#define LAPACK_dlapy2 LAPACK_GLOBAL(dlapy2,DLAPY2)\n#define LAPACK_slapy3 LAPACK_GLOBAL(slapy3,SLAPY3)\n#define LAPACK_dlapy3 LAPACK_GLOBAL(dlapy3,DLAPY3)\n#define LAPACK_slartgp LAPACK_GLOBAL(slartgp,SLARTGP)\n#define LAPACK_dlartgp LAPACK_GLOBAL(dlartgp,DLARTGP)\n#define LAPACK_slartgs LAPACK_GLOBAL(slartgs,SLARTGS)\n#define LAPACK_dlartgs LAPACK_GLOBAL(dlartgs,DLARTGS)\n// LAPACK 3.3.0\n#define LAPACK_cbbcsd LAPACK_GLOBAL(cbbcsd,CBBCSD)\n#define LAPACK_cheswapr LAPACK_GLOBAL(cheswapr,CHESWAPR)\n#define LAPACK_chetri2 LAPACK_GLOBAL(chetri2,CHETRI2)\n#define LAPACK_chetri2x LAPACK_GLOBAL(chetri2x,CHETRI2X)\n#define LAPACK_chetrs2 LAPACK_GLOBAL(chetrs2,CHETRS2)\n#define LAPACK_csyconv LAPACK_GLOBAL(csyconv,CSYCONV)\n#define LAPACK_csyswapr LAPACK_GLOBAL(csyswapr,CSYSWAPR)\n#define LAPACK_csytri2 LAPACK_GLOBAL(csytri2,CSYTRI2)\n#define LAPACK_csytri2x LAPACK_GLOBAL(csytri2x,CSYTRI2X)\n#define LAPACK_csytrs2 LAPACK_GLOBAL(csytrs2,CSYTRS2)\n#define LAPACK_cunbdb LAPACK_GLOBAL(cunbdb,CUNBDB)\n#define LAPACK_cuncsd LAPACK_GLOBAL(cuncsd,CUNCSD)\n#define LAPACK_dbbcsd LAPACK_GLOBAL(dbbcsd,DBBCSD)\n#define LAPACK_dorbdb LAPACK_GLOBAL(dorbdb,DORBDB)\n#define LAPACK_dorcsd LAPACK_GLOBAL(dorcsd,DORCSD)\n#define LAPACK_dsyconv LAPACK_GLOBAL(dsyconv,DSYCONV)\n#define LAPACK_dsyswapr LAPACK_GLOBAL(dsyswapr,DSYSWAPR)\n#define LAPACK_dsytri2 LAPACK_GLOBAL(dsytri2,DSYTRI2)\n#define LAPACK_dsytri2x LAPACK_GLOBAL(dsytri2x,DSYTRI2X)\n#define LAPACK_dsytrs2 LAPACK_GLOBAL(dsytrs2,DSYTRS2)\n#define LAPACK_sbbcsd LAPACK_GLOBAL(sbbcsd,SBBCSD)\n#define LAPACK_sorbdb LAPACK_GLOBAL(sorbdb,SORBDB)\n#define LAPACK_sorcsd LAPACK_GLOBAL(sorcsd,SORCSD)\n#define LAPACK_ssyconv LAPACK_GLOBAL(ssyconv,SSYCONV)\n#define LAPACK_ssyswapr LAPACK_GLOBAL(ssyswapr,SSYSWAPR)\n#define LAPACK_ssytri2 LAPACK_GLOBAL(ssytri2,SSYTRI2)\n#define LAPACK_ssytri2x LAPACK_GLOBAL(ssytri2x,SSYTRI2X)\n#define LAPACK_ssytrs2 LAPACK_GLOBAL(ssytrs2,SSYTRS2)\n#define LAPACK_zbbcsd LAPACK_GLOBAL(zbbcsd,ZBBCSD)\n#define LAPACK_zheswapr LAPACK_GLOBAL(zheswapr,ZHESWAPR)\n#define LAPACK_zhetri2 LAPACK_GLOBAL(zhetri2,ZHETRI2)\n#define LAPACK_zhetri2x LAPACK_GLOBAL(zhetri2x,ZHETRI2X)\n#define LAPACK_zhetrs2 LAPACK_GLOBAL(zhetrs2,ZHETRS2)\n#define LAPACK_zsyconv LAPACK_GLOBAL(zsyconv,ZSYCONV)\n#define LAPACK_zsyswapr LAPACK_GLOBAL(zsyswapr,ZSYSWAPR)\n#define LAPACK_zsytri2 LAPACK_GLOBAL(zsytri2,ZSYTRI2)\n#define LAPACK_zsytri2x LAPACK_GLOBAL(zsytri2x,ZSYTRI2X)\n#define LAPACK_zsytrs2 LAPACK_GLOBAL(zsytrs2,ZSYTRS2)\n#define LAPACK_zunbdb LAPACK_GLOBAL(zunbdb,ZUNBDB)\n#define LAPACK_zuncsd LAPACK_GLOBAL(zuncsd,ZUNCSD)\n// LAPACK 3.4.0\n#define LAPACK_sgemqrt LAPACK_GLOBAL(sgemqrt,SGEMQRT)\n#define LAPACK_dgemqrt LAPACK_GLOBAL(dgemqrt,DGEMQRT)\n#define LAPACK_cgemqrt LAPACK_GLOBAL(cgemqrt,CGEMQRT)\n#define LAPACK_zgemqrt LAPACK_GLOBAL(zgemqrt,ZGEMQRT)\n#define LAPACK_sgeqrt LAPACK_GLOBAL(sgeqrt,SGEQRT)\n#define LAPACK_dgeqrt LAPACK_GLOBAL(dgeqrt,DGEQRT)\n#define LAPACK_cgeqrt LAPACK_GLOBAL(cgeqrt,CGEQRT)\n#define LAPACK_zgeqrt LAPACK_GLOBAL(zgeqrt,ZGEQRT)\n#define LAPACK_sgeqrt2 LAPACK_GLOBAL(sgeqrt2,SGEQRT2)\n#define LAPACK_dgeqrt2 LAPACK_GLOBAL(dgeqrt2,DGEQRT2)\n#define LAPACK_cgeqrt2 LAPACK_GLOBAL(cgeqrt2,CGEQRT2)\n#define LAPACK_zgeqrt2 LAPACK_GLOBAL(zgeqrt2,ZGEQRT2)\n#define LAPACK_sgeqrt3 LAPACK_GLOBAL(sgeqrt3,SGEQRT3)\n#define LAPACK_dgeqrt3 LAPACK_GLOBAL(dgeqrt3,DGEQRT3)\n#define LAPACK_cgeqrt3 LAPACK_GLOBAL(cgeqrt3,CGEQRT3)\n#define LAPACK_zgeqrt3 LAPACK_GLOBAL(zgeqrt3,ZGEQRT3)\n#define LAPACK_stpmqrt LAPACK_GLOBAL(stpmqrt,STPMQRT)\n#define LAPACK_dtpmqrt LAPACK_GLOBAL(dtpmqrt,DTPMQRT)\n#define LAPACK_ctpmqrt LAPACK_GLOBAL(ctpmqrt,CTPMQRT)\n#define LAPACK_ztpmqrt LAPACK_GLOBAL(ztpmqrt,ZTPMQRT)\n#define LAPACK_dtpqrt LAPACK_GLOBAL(dtpqrt,DTPQRT)\n#define LAPACK_ctpqrt LAPACK_GLOBAL(ctpqrt,CTPQRT)\n#define LAPACK_ztpqrt LAPACK_GLOBAL(ztpqrt,ZTPQRT)\n#define LAPACK_stpqrt2 LAPACK_GLOBAL(stpqrt2,STPQRT2)\n#define LAPACK_dtpqrt2 LAPACK_GLOBAL(dtpqrt2,DTPQRT2)\n#define LAPACK_ctpqrt2 LAPACK_GLOBAL(ctpqrt2,CTPQRT2)\n#define LAPACK_ztpqrt2 LAPACK_GLOBAL(ztpqrt2,ZTPQRT2)\n#define LAPACK_stprfb LAPACK_GLOBAL(stprfb,STPRFB)\n#define LAPACK_dtprfb LAPACK_GLOBAL(dtprfb,DTPRFB)\n#define LAPACK_ctprfb LAPACK_GLOBAL(ctprfb,CTPRFB)\n#define LAPACK_ztprfb LAPACK_GLOBAL(ztprfb,ZTPRFB)\n// LAPACK 3.X.X\n#define LAPACK_ssysv_rook LAPACK_GLOBAL(ssysv_rook,SSYSV_ROOK)\n#define LAPACK_dsysv_rook LAPACK_GLOBAL(dsysv_rook,DSYSV_ROOK)\n#define LAPACK_csysv_rook LAPACK_GLOBAL(csysv_rook,CSYSV_ROOK)\n#define LAPACK_zsysv_rook LAPACK_GLOBAL(zsysv_rook,ZSYSV_ROOK)\n#define LAPACK_csyr LAPACK_GLOBAL(csyr,CSYR)\n#define LAPACK_zsyr LAPACK_GLOBAL(zsyr,ZSYR)\n#define LAPACK_ilaver LAPACK_GLOBAL(ilaver,ILAVER)\n\nvoid LAPACK_sgetrf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_dgetrf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_cgetrf( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_zgetrf( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_sgbtrf( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, float* ab, lapack_int* ldab,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_dgbtrf( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, double* ab, lapack_int* ldab,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_cgbtrf( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, lapack_complex_float* ab, lapack_int* ldab,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_zgbtrf( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, lapack_complex_double* ab, lapack_int* ldab,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_sgttrf( lapack_int* n, float* dl, float* d, float* du, float* du2,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_dgttrf( lapack_int* n, double* dl, double* d, double* du,\n                    double* du2, lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_cgttrf( lapack_int* n, lapack_complex_float* dl,\n                    lapack_complex_float* d, lapack_complex_float* du,\n                    lapack_complex_float* du2, lapack_int* ipiv,\n                    lapack_int *info );\nvoid LAPACK_zgttrf( lapack_int* n, lapack_complex_double* dl,\n                    lapack_complex_double* d, lapack_complex_double* du,\n                    lapack_complex_double* du2, lapack_int* ipiv,\n                    lapack_int *info );\nvoid LAPACK_spotrf( char* uplo, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_dpotrf( char* uplo, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_cpotrf( char* uplo, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_zpotrf( char* uplo, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_dpstrf( char* uplo, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int* piv, lapack_int* rank, double* tol,\n                    double* work, lapack_int *info );\nvoid LAPACK_spstrf( char* uplo, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int* piv, lapack_int* rank, float* tol, float* work,\n                    lapack_int *info );\nvoid LAPACK_zpstrf( char* uplo, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* piv, lapack_int* rank,\n                    double* tol, double* work, lapack_int *info );\nvoid LAPACK_cpstrf( char* uplo, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int* piv, lapack_int* rank,\n                    float* tol, float* work, lapack_int *info );\nvoid LAPACK_dpftrf( char* transr, char* uplo, lapack_int* n, double* a,\n                    lapack_int *info );\nvoid LAPACK_spftrf( char* transr, char* uplo, lapack_int* n, float* a,\n                    lapack_int *info );\nvoid LAPACK_zpftrf( char* transr, char* uplo, lapack_int* n,\n                    lapack_complex_double* a, lapack_int *info );\nvoid LAPACK_cpftrf( char* transr, char* uplo, lapack_int* n,\n                    lapack_complex_float* a, lapack_int *info );\nvoid LAPACK_spptrf( char* uplo, lapack_int* n, float* ap, lapack_int *info );\nvoid LAPACK_dpptrf( char* uplo, lapack_int* n, double* ap, lapack_int *info );\nvoid LAPACK_cpptrf( char* uplo, lapack_int* n, lapack_complex_float* ap,\n                    lapack_int *info );\nvoid LAPACK_zpptrf( char* uplo, lapack_int* n, lapack_complex_double* ap,\n                    lapack_int *info );\nvoid LAPACK_spbtrf( char* uplo, lapack_int* n, lapack_int* kd, float* ab,\n                    lapack_int* ldab, lapack_int *info );\nvoid LAPACK_dpbtrf( char* uplo, lapack_int* n, lapack_int* kd, double* ab,\n                    lapack_int* ldab, lapack_int *info );\nvoid LAPACK_cpbtrf( char* uplo, lapack_int* n, lapack_int* kd,\n                    lapack_complex_float* ab, lapack_int* ldab,\n                    lapack_int *info );\nvoid LAPACK_zpbtrf( char* uplo, lapack_int* n, lapack_int* kd,\n                    lapack_complex_double* ab, lapack_int* ldab,\n                    lapack_int *info );\nvoid LAPACK_spttrf( lapack_int* n, float* d, float* e, lapack_int *info );\nvoid LAPACK_dpttrf( lapack_int* n, double* d, double* e, lapack_int *info );\nvoid LAPACK_cpttrf( lapack_int* n, float* d, lapack_complex_float* e,\n                    lapack_int *info );\nvoid LAPACK_zpttrf( lapack_int* n, double* d, lapack_complex_double* e,\n                    lapack_int *info );\nvoid LAPACK_ssytrf( char* uplo, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int* ipiv, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dsytrf( char* uplo, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int* ipiv, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_csytrf( char* uplo, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int* ipiv,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zsytrf( char* uplo, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* ipiv,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_chetrf( char* uplo, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int* ipiv,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zhetrf( char* uplo, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* ipiv,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_ssptrf( char* uplo, lapack_int* n, float* ap, lapack_int* ipiv,\n                    lapack_int *info );\nvoid LAPACK_dsptrf( char* uplo, lapack_int* n, double* ap, lapack_int* ipiv,\n                    lapack_int *info );\nvoid LAPACK_csptrf( char* uplo, lapack_int* n, lapack_complex_float* ap,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_zsptrf( char* uplo, lapack_int* n, lapack_complex_double* ap,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_chptrf( char* uplo, lapack_int* n, lapack_complex_float* ap,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_zhptrf( char* uplo, lapack_int* n, lapack_complex_double* ap,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_sgetrs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const float* a, lapack_int* lda, const lapack_int* ipiv,\n                    float* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dgetrs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const double* a, lapack_int* lda, const lapack_int* ipiv,\n                    double* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_cgetrs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    const lapack_int* ipiv, lapack_complex_float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zgetrs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_int* ipiv, lapack_complex_double* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_sgbtrs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    lapack_int* nrhs, const float* ab, lapack_int* ldab,\n                    const lapack_int* ipiv, float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_dgbtrs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    lapack_int* nrhs, const double* ab, lapack_int* ldab,\n                    const lapack_int* ipiv, double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_cgbtrs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    lapack_int* nrhs, const lapack_complex_float* ab,\n                    lapack_int* ldab, const lapack_int* ipiv,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_zgbtrs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    lapack_int* nrhs, const lapack_complex_double* ab,\n                    lapack_int* ldab, const lapack_int* ipiv,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_sgttrs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const float* dl, const float* d, const float* du,\n                    const float* du2, const lapack_int* ipiv, float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dgttrs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const double* dl, const double* d, const double* du,\n                    const double* du2, const lapack_int* ipiv, double* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_cgttrs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* dl,\n                    const lapack_complex_float* d,\n                    const lapack_complex_float* du,\n                    const lapack_complex_float* du2, const lapack_int* ipiv,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_zgttrs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* dl,\n                    const lapack_complex_double* d,\n                    const lapack_complex_double* du,\n                    const lapack_complex_double* du2, const lapack_int* ipiv,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_spotrs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a,\n                    lapack_int* lda, float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_dpotrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* a, lapack_int* lda, double* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_cpotrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_zpotrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_dpftrs( char* transr, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* a, double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_spftrs( char* transr, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const float* a, float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_zpftrs( char* transr, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_complex_double* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_cpftrs( char* transr, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_complex_float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_spptrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const float* ap, float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_dpptrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* ap, double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_cpptrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* ap, lapack_complex_float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zpptrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* ap, lapack_complex_double* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_spbtrs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                    const float* ab, lapack_int* ldab, float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dpbtrs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                    const double* ab, lapack_int* ldab, double* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_cpbtrs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                    const lapack_complex_float* ab, lapack_int* ldab,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_zpbtrs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                    const lapack_complex_double* ab, lapack_int* ldab,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_spttrs( lapack_int* n, lapack_int* nrhs, const float* d,\n                    const float* e, float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_dpttrs( lapack_int* n, lapack_int* nrhs, const double* d,\n                    const double* e, double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_cpttrs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* d,\n                    const lapack_complex_float* e, lapack_complex_float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zpttrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* d, const lapack_complex_double* e,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_ssytrs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a,\n                    lapack_int* lda, const lapack_int* ipiv, float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dsytrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* a, lapack_int* lda, const lapack_int* ipiv,\n                    double* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_csytrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    const lapack_int* ipiv, lapack_complex_float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zsytrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_int* ipiv, lapack_complex_double* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_chetrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    const lapack_int* ipiv, lapack_complex_float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zhetrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_int* ipiv, lapack_complex_double* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_ssptrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const float* ap, const lapack_int* ipiv, float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dsptrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* ap, const lapack_int* ipiv, double* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_csptrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* ap, const lapack_int* ipiv,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_zsptrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* ap, const lapack_int* ipiv,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_chptrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* ap, const lapack_int* ipiv,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_zhptrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* ap, const lapack_int* ipiv,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_strtrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const float* a, lapack_int* lda, float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dtrtrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const double* a, lapack_int* lda,\n                    double* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_ctrtrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_ztrtrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_stptrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const float* ap, float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dtptrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const double* ap, double* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_ctptrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const lapack_complex_float* ap,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_ztptrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const lapack_complex_double* ap,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_stbtrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* kd, lapack_int* nrhs, const float* ab,\n                    lapack_int* ldab, float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_dtbtrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* kd, lapack_int* nrhs, const double* ab,\n                    lapack_int* ldab, double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_ctbtrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* kd, lapack_int* nrhs,\n                    const lapack_complex_float* ab, lapack_int* ldab,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_ztbtrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* kd, lapack_int* nrhs,\n                    const lapack_complex_double* ab, lapack_int* ldab,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_sgecon( char* norm, lapack_int* n, const float* a, lapack_int* lda,\n                    float* anorm, float* rcond, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dgecon( char* norm, lapack_int* n, const double* a, lapack_int* lda,\n                    double* anorm, double* rcond, double* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cgecon( char* norm, lapack_int* n, const lapack_complex_float* a,\n                    lapack_int* lda, float* anorm, float* rcond,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zgecon( char* norm, lapack_int* n, const lapack_complex_double* a,\n                    lapack_int* lda, double* anorm, double* rcond,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_sgbcon( char* norm, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    const float* ab, lapack_int* ldab, const lapack_int* ipiv,\n                    float* anorm, float* rcond, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dgbcon( char* norm, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    const double* ab, lapack_int* ldab, const lapack_int* ipiv,\n                    double* anorm, double* rcond, double* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cgbcon( char* norm, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    const lapack_complex_float* ab, lapack_int* ldab,\n                    const lapack_int* ipiv, float* anorm, float* rcond,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zgbcon( char* norm, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    const lapack_complex_double* ab, lapack_int* ldab,\n                    const lapack_int* ipiv, double* anorm, double* rcond,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_sgtcon( char* norm, lapack_int* n, const float* dl, const float* d,\n                    const float* du, const float* du2, const lapack_int* ipiv,\n                    float* anorm, float* rcond, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dgtcon( char* norm, lapack_int* n, const double* dl,\n                    const double* d, const double* du, const double* du2,\n                    const lapack_int* ipiv, double* anorm, double* rcond,\n                    double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cgtcon( char* norm, lapack_int* n, const lapack_complex_float* dl,\n                    const lapack_complex_float* d,\n                    const lapack_complex_float* du,\n                    const lapack_complex_float* du2, const lapack_int* ipiv,\n                    float* anorm, float* rcond, lapack_complex_float* work,\n                    lapack_int *info );\nvoid LAPACK_zgtcon( char* norm, lapack_int* n, const lapack_complex_double* dl,\n                    const lapack_complex_double* d,\n                    const lapack_complex_double* du,\n                    const lapack_complex_double* du2, const lapack_int* ipiv,\n                    double* anorm, double* rcond, lapack_complex_double* work,\n                    lapack_int *info );\nvoid LAPACK_spocon( char* uplo, lapack_int* n, const float* a, lapack_int* lda,\n                    float* anorm, float* rcond, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dpocon( char* uplo, lapack_int* n, const double* a, lapack_int* lda,\n                    double* anorm, double* rcond, double* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cpocon( char* uplo, lapack_int* n, const lapack_complex_float* a,\n                    lapack_int* lda, float* anorm, float* rcond,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zpocon( char* uplo, lapack_int* n, const lapack_complex_double* a,\n                    lapack_int* lda, double* anorm, double* rcond,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_sppcon( char* uplo, lapack_int* n, const float* ap, float* anorm,\n                    float* rcond, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dppcon( char* uplo, lapack_int* n, const double* ap, double* anorm,\n                    double* rcond, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_cppcon( char* uplo, lapack_int* n, const lapack_complex_float* ap,\n                    float* anorm, float* rcond, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zppcon( char* uplo, lapack_int* n, const lapack_complex_double* ap,\n                    double* anorm, double* rcond, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_spbcon( char* uplo, lapack_int* n, lapack_int* kd, const float* ab,\n                    lapack_int* ldab, float* anorm, float* rcond, float* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dpbcon( char* uplo, lapack_int* n, lapack_int* kd, const double* ab,\n                    lapack_int* ldab, double* anorm, double* rcond,\n                    double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cpbcon( char* uplo, lapack_int* n, lapack_int* kd,\n                    const lapack_complex_float* ab, lapack_int* ldab,\n                    float* anorm, float* rcond, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zpbcon( char* uplo, lapack_int* n, lapack_int* kd,\n                    const lapack_complex_double* ab, lapack_int* ldab,\n                    double* anorm, double* rcond, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_sptcon( lapack_int* n, const float* d, const float* e, float* anorm,\n                    float* rcond, float* work, lapack_int *info );\nvoid LAPACK_dptcon( lapack_int* n, const double* d, const double* e,\n                    double* anorm, double* rcond, double* work,\n                    lapack_int *info );\nvoid LAPACK_cptcon( lapack_int* n, const float* d,\n                    const lapack_complex_float* e, float* anorm, float* rcond,\n                    float* work, lapack_int *info );\nvoid LAPACK_zptcon( lapack_int* n, const double* d,\n                    const lapack_complex_double* e, double* anorm,\n                    double* rcond, double* work, lapack_int *info );\nvoid LAPACK_ssycon( char* uplo, lapack_int* n, const float* a, lapack_int* lda,\n                    const lapack_int* ipiv, float* anorm, float* rcond,\n                    float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dsycon( char* uplo, lapack_int* n, const double* a, lapack_int* lda,\n                    const lapack_int* ipiv, double* anorm, double* rcond,\n                    double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_csycon( char* uplo, lapack_int* n, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_int* ipiv, float* anorm,\n                    float* rcond, lapack_complex_float* work,\n                    lapack_int *info );\nvoid LAPACK_zsycon( char* uplo, lapack_int* n, const lapack_complex_double* a,\n                    lapack_int* lda, const lapack_int* ipiv, double* anorm,\n                    double* rcond, lapack_complex_double* work,\n                    lapack_int *info );\nvoid LAPACK_checon( char* uplo, lapack_int* n, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_int* ipiv, float* anorm,\n                    float* rcond, lapack_complex_float* work,\n                    lapack_int *info );\nvoid LAPACK_zhecon( char* uplo, lapack_int* n, const lapack_complex_double* a,\n                    lapack_int* lda, const lapack_int* ipiv, double* anorm,\n                    double* rcond, lapack_complex_double* work,\n                    lapack_int *info );\nvoid LAPACK_sspcon( char* uplo, lapack_int* n, const float* ap,\n                    const lapack_int* ipiv, float* anorm, float* rcond,\n                    float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dspcon( char* uplo, lapack_int* n, const double* ap,\n                    const lapack_int* ipiv, double* anorm, double* rcond,\n                    double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cspcon( char* uplo, lapack_int* n, const lapack_complex_float* ap,\n                    const lapack_int* ipiv, float* anorm, float* rcond,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zspcon( char* uplo, lapack_int* n, const lapack_complex_double* ap,\n                    const lapack_int* ipiv, double* anorm, double* rcond,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_chpcon( char* uplo, lapack_int* n, const lapack_complex_float* ap,\n                    const lapack_int* ipiv, float* anorm, float* rcond,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zhpcon( char* uplo, lapack_int* n, const lapack_complex_double* ap,\n                    const lapack_int* ipiv, double* anorm, double* rcond,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_strcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    const float* a, lapack_int* lda, float* rcond, float* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dtrcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    const double* a, lapack_int* lda, double* rcond,\n                    double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ctrcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    float* rcond, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_ztrcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    double* rcond, lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_stpcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    const float* ap, float* rcond, float* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dtpcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    const double* ap, double* rcond, double* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ctpcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    const lapack_complex_float* ap, float* rcond,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_ztpcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    const lapack_complex_double* ap, double* rcond,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_stbcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    lapack_int* kd, const float* ab, lapack_int* ldab,\n                    float* rcond, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dtbcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    lapack_int* kd, const double* ab, lapack_int* ldab,\n                    double* rcond, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_ctbcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    lapack_int* kd, const lapack_complex_float* ab,\n                    lapack_int* ldab, float* rcond, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_ztbcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    lapack_int* kd, const lapack_complex_double* ab,\n                    lapack_int* ldab, double* rcond,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_sgerfs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const float* a, lapack_int* lda, const float* af,\n                    lapack_int* ldaf, const lapack_int* ipiv, const float* b,\n                    lapack_int* ldb, float* x, lapack_int* ldx, float* ferr,\n                    float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dgerfs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const double* a, lapack_int* lda, const double* af,\n                    lapack_int* ldaf, const lapack_int* ipiv, const double* b,\n                    lapack_int* ldb, double* x, lapack_int* ldx, double* ferr,\n                    double* berr, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_cgerfs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* af, lapack_int* ldaf,\n                    const lapack_int* ipiv, const lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,\n                    float* ferr, float* berr, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zgerfs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* af, lapack_int* ldaf,\n                    const lapack_int* ipiv, const lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                    double* ferr, double* berr, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_dgerfsx( char* trans, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const double* a, lapack_int* lda, const double* af,\n                     lapack_int* ldaf, const lapack_int* ipiv, const double* r,\n                     const double* c, const double* b, lapack_int* ldb,\n                     double* x, lapack_int* ldx, double* rcond, double* berr,\n                     lapack_int* n_err_bnds, double* err_bnds_norm,\n                     double* err_bnds_comp, lapack_int* nparams, double* params,\n                     double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_sgerfsx( char* trans, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const float* a, lapack_int* lda, const float* af,\n                     lapack_int* ldaf, const lapack_int* ipiv, const float* r,\n                     const float* c, const float* b, lapack_int* ldb, float* x,\n                     lapack_int* ldx, float* rcond, float* berr,\n                     lapack_int* n_err_bnds, float* err_bnds_norm,\n                     float* err_bnds_comp, lapack_int* nparams, float* params,\n                     float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_zgerfsx( char* trans, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const lapack_complex_double* a, lapack_int* lda,\n                     const lapack_complex_double* af, lapack_int* ldaf,\n                     const lapack_int* ipiv, const double* r, const double* c,\n                     const lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                     double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params,\n                     lapack_complex_double* work, double* rwork,\n                     lapack_int *info );\nvoid LAPACK_cgerfsx( char* trans, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const lapack_complex_float* a, lapack_int* lda,\n                     const lapack_complex_float* af, lapack_int* ldaf,\n                     const lapack_int* ipiv, const float* r, const float* c,\n                     const lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                     float* berr, lapack_int* n_err_bnds, float* err_bnds_norm,\n                     float* err_bnds_comp, lapack_int* nparams, float* params,\n                     lapack_complex_float* work, float* rwork,\n                     lapack_int *info );\nvoid LAPACK_sgbrfs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    lapack_int* nrhs, const float* ab, lapack_int* ldab,\n                    const float* afb, lapack_int* ldafb, const lapack_int* ipiv,\n                    const float* b, lapack_int* ldb, float* x, lapack_int* ldx,\n                    float* ferr, float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dgbrfs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    lapack_int* nrhs, const double* ab, lapack_int* ldab,\n                    const double* afb, lapack_int* ldafb,\n                    const lapack_int* ipiv, const double* b, lapack_int* ldb,\n                    double* x, lapack_int* ldx, double* ferr, double* berr,\n                    double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cgbrfs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    lapack_int* nrhs, const lapack_complex_float* ab,\n                    lapack_int* ldab, const lapack_complex_float* afb,\n                    lapack_int* ldafb, const lapack_int* ipiv,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* ferr,\n                    float* berr, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zgbrfs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    lapack_int* nrhs, const lapack_complex_double* ab,\n                    lapack_int* ldab, const lapack_complex_double* afb,\n                    lapack_int* ldafb, const lapack_int* ipiv,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* ferr,\n                    double* berr, lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_dgbrfsx( char* trans, char* equed, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, lapack_int* nrhs, const double* ab,\n                     lapack_int* ldab, const double* afb, lapack_int* ldafb,\n                     const lapack_int* ipiv, const double* r, const double* c,\n                     const double* b, lapack_int* ldb, double* x,\n                     lapack_int* ldx, double* rcond, double* berr,\n                     lapack_int* n_err_bnds, double* err_bnds_norm,\n                     double* err_bnds_comp, lapack_int* nparams, double* params,\n                     double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_sgbrfsx( char* trans, char* equed, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, lapack_int* nrhs, const float* ab,\n                     lapack_int* ldab, const float* afb, lapack_int* ldafb,\n                     const lapack_int* ipiv, const float* r, const float* c,\n                     const float* b, lapack_int* ldb, float* x, lapack_int* ldx,\n                     float* rcond, float* berr, lapack_int* n_err_bnds,\n                     float* err_bnds_norm, float* err_bnds_comp,\n                     lapack_int* nparams, float* params, float* work,\n                     lapack_int* iwork, lapack_int *info );\nvoid LAPACK_zgbrfsx( char* trans, char* equed, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, lapack_int* nrhs,\n                     const lapack_complex_double* ab, lapack_int* ldab,\n                     const lapack_complex_double* afb, lapack_int* ldafb,\n                     const lapack_int* ipiv, const double* r, const double* c,\n                     const lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                     double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params,\n                     lapack_complex_double* work, double* rwork,\n                     lapack_int *info );\nvoid LAPACK_cgbrfsx( char* trans, char* equed, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, lapack_int* nrhs,\n                     const lapack_complex_float* ab, lapack_int* ldab,\n                     const lapack_complex_float* afb, lapack_int* ldafb,\n                     const lapack_int* ipiv, const float* r, const float* c,\n                     const lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                     float* berr, lapack_int* n_err_bnds, float* err_bnds_norm,\n                     float* err_bnds_comp, lapack_int* nparams, float* params,\n                     lapack_complex_float* work, float* rwork,\n                     lapack_int *info );\nvoid LAPACK_sgtrfs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const float* dl, const float* d, const float* du,\n                    const float* dlf, const float* df, const float* duf,\n                    const float* du2, const lapack_int* ipiv, const float* b,\n                    lapack_int* ldb, float* x, lapack_int* ldx, float* ferr,\n                    float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dgtrfs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const double* dl, const double* d, const double* du,\n                    const double* dlf, const double* df, const double* duf,\n                    const double* du2, const lapack_int* ipiv, const double* b,\n                    lapack_int* ldb, double* x, lapack_int* ldx, double* ferr,\n                    double* berr, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_cgtrfs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* dl,\n                    const lapack_complex_float* d,\n                    const lapack_complex_float* du,\n                    const lapack_complex_float* dlf,\n                    const lapack_complex_float* df,\n                    const lapack_complex_float* duf,\n                    const lapack_complex_float* du2, const lapack_int* ipiv,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* ferr,\n                    float* berr, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zgtrfs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* dl,\n                    const lapack_complex_double* d,\n                    const lapack_complex_double* du,\n                    const lapack_complex_double* dlf,\n                    const lapack_complex_double* df,\n                    const lapack_complex_double* duf,\n                    const lapack_complex_double* du2, const lapack_int* ipiv,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* ferr,\n                    double* berr, lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_sporfs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a,\n                    lapack_int* lda, const float* af, lapack_int* ldaf,\n                    const float* b, lapack_int* ldb, float* x, lapack_int* ldx,\n                    float* ferr, float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dporfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* a, lapack_int* lda, const double* af,\n                    lapack_int* ldaf, const double* b, lapack_int* ldb,\n                    double* x, lapack_int* ldx, double* ferr, double* berr,\n                    double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cporfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* af, lapack_int* ldaf,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* ferr,\n                    float* berr, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zporfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* af, lapack_int* ldaf,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* ferr,\n                    double* berr, lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_dporfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const double* a, lapack_int* lda, const double* af,\n                     lapack_int* ldaf, const double* s, const double* b,\n                     lapack_int* ldb, double* x, lapack_int* ldx, double* rcond,\n                     double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params, double* work,\n                     lapack_int* iwork, lapack_int *info );\nvoid LAPACK_sporfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const float* a, lapack_int* lda, const float* af,\n                     lapack_int* ldaf, const float* s, const float* b,\n                     lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,\n                     float* berr, lapack_int* n_err_bnds, float* err_bnds_norm,\n                     float* err_bnds_comp, lapack_int* nparams, float* params,\n                     float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_zporfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const lapack_complex_double* a, lapack_int* lda,\n                     const lapack_complex_double* af, lapack_int* ldaf,\n                     const double* s, const lapack_complex_double* b,\n                     lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                     double* rcond, double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params,\n                     lapack_complex_double* work, double* rwork,\n                     lapack_int *info );\nvoid LAPACK_cporfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const lapack_complex_float* a, lapack_int* lda,\n                     const lapack_complex_float* af, lapack_int* ldaf,\n                     const float* s, const lapack_complex_float* b,\n                     lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,\n                     float* rcond, float* berr, lapack_int* n_err_bnds,\n                     float* err_bnds_norm, float* err_bnds_comp,\n                     lapack_int* nparams, float* params,\n                     lapack_complex_float* work, float* rwork,\n                     lapack_int *info );\nvoid LAPACK_spprfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const float* ap, const float* afp, const float* b,\n                    lapack_int* ldb, float* x, lapack_int* ldx, float* ferr,\n                    float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dpprfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* ap, const double* afp, const double* b,\n                    lapack_int* ldb, double* x, lapack_int* ldx, double* ferr,\n                    double* berr, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_cpprfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* ap,\n                    const lapack_complex_float* afp,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* ferr,\n                    float* berr, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zpprfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* ap,\n                    const lapack_complex_double* afp,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* ferr,\n                    double* berr, lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_spbrfs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                    const float* ab, lapack_int* ldab, const float* afb,\n                    lapack_int* ldafb, const float* b, lapack_int* ldb,\n                    float* x, lapack_int* ldx, float* ferr, float* berr,\n                    float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dpbrfs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                    const double* ab, lapack_int* ldab, const double* afb,\n                    lapack_int* ldafb, const double* b, lapack_int* ldb,\n                    double* x, lapack_int* ldx, double* ferr, double* berr,\n                    double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cpbrfs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                    const lapack_complex_float* ab, lapack_int* ldab,\n                    const lapack_complex_float* afb, lapack_int* ldafb,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* ferr,\n                    float* berr, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zpbrfs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                    const lapack_complex_double* ab, lapack_int* ldab,\n                    const lapack_complex_double* afb, lapack_int* ldafb,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* ferr,\n                    double* berr, lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_sptrfs( lapack_int* n, lapack_int* nrhs, const float* d,\n                    const float* e, const float* df, const float* ef,\n                    const float* b, lapack_int* ldb, float* x, lapack_int* ldx,\n                    float* ferr, float* berr, float* work, lapack_int *info );\nvoid LAPACK_dptrfs( lapack_int* n, lapack_int* nrhs, const double* d,\n                    const double* e, const double* df, const double* ef,\n                    const double* b, lapack_int* ldb, double* x,\n                    lapack_int* ldx, double* ferr, double* berr, double* work,\n                    lapack_int *info );\nvoid LAPACK_cptrfs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* d,\n                    const lapack_complex_float* e, const float* df,\n                    const lapack_complex_float* ef,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* ferr,\n                    float* berr, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zptrfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* d, const lapack_complex_double* e,\n                    const double* df, const lapack_complex_double* ef,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* ferr,\n                    double* berr, lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_ssyrfs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a,\n                    lapack_int* lda, const float* af, lapack_int* ldaf,\n                    const lapack_int* ipiv, const float* b, lapack_int* ldb,\n                    float* x, lapack_int* ldx, float* ferr, float* berr,\n                    float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dsyrfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* a, lapack_int* lda, const double* af,\n                    lapack_int* ldaf, const lapack_int* ipiv, const double* b,\n                    lapack_int* ldb, double* x, lapack_int* ldx, double* ferr,\n                    double* berr, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_csyrfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* af, lapack_int* ldaf,\n                    const lapack_int* ipiv, const lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,\n                    float* ferr, float* berr, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zsyrfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* af, lapack_int* ldaf,\n                    const lapack_int* ipiv, const lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                    double* ferr, double* berr, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_dsyrfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const double* a, lapack_int* lda, const double* af,\n                     lapack_int* ldaf, const lapack_int* ipiv, const double* s,\n                     const double* b, lapack_int* ldb, double* x,\n                     lapack_int* ldx, double* rcond, double* berr,\n                     lapack_int* n_err_bnds, double* err_bnds_norm,\n                     double* err_bnds_comp, lapack_int* nparams, double* params,\n                     double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ssyrfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const float* a, lapack_int* lda, const float* af,\n                     lapack_int* ldaf, const lapack_int* ipiv, const float* s,\n                     const float* b, lapack_int* ldb, float* x, lapack_int* ldx,\n                     float* rcond, float* berr, lapack_int* n_err_bnds,\n                     float* err_bnds_norm, float* err_bnds_comp,\n                     lapack_int* nparams, float* params, float* work,\n                     lapack_int* iwork, lapack_int *info );\nvoid LAPACK_zsyrfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const lapack_complex_double* a, lapack_int* lda,\n                     const lapack_complex_double* af, lapack_int* ldaf,\n                     const lapack_int* ipiv, const double* s,\n                     const lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                     double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params,\n                     lapack_complex_double* work, double* rwork,\n                     lapack_int *info );\nvoid LAPACK_csyrfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const lapack_complex_float* a, lapack_int* lda,\n                     const lapack_complex_float* af, lapack_int* ldaf,\n                     const lapack_int* ipiv, const float* s,\n                     const lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                     float* berr, lapack_int* n_err_bnds, float* err_bnds_norm,\n                     float* err_bnds_comp, lapack_int* nparams, float* params,\n                     lapack_complex_float* work, float* rwork,\n                     lapack_int *info );\nvoid LAPACK_cherfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* af, lapack_int* ldaf,\n                    const lapack_int* ipiv, const lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,\n                    float* ferr, float* berr, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zherfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* af, lapack_int* ldaf,\n                    const lapack_int* ipiv, const lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                    double* ferr, double* berr, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_zherfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const lapack_complex_double* a, lapack_int* lda,\n                     const lapack_complex_double* af, lapack_int* ldaf,\n                     const lapack_int* ipiv, const double* s,\n                     const lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                     double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params,\n                     lapack_complex_double* work, double* rwork,\n                     lapack_int *info );\nvoid LAPACK_cherfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const lapack_complex_float* a, lapack_int* lda,\n                     const lapack_complex_float* af, lapack_int* ldaf,\n                     const lapack_int* ipiv, const float* s,\n                     const lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                     float* berr, lapack_int* n_err_bnds, float* err_bnds_norm,\n                     float* err_bnds_comp, lapack_int* nparams, float* params,\n                     lapack_complex_float* work, float* rwork,\n                     lapack_int *info );\nvoid LAPACK_ssprfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const float* ap, const float* afp, const lapack_int* ipiv,\n                    const float* b, lapack_int* ldb, float* x, lapack_int* ldx,\n                    float* ferr, float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dsprfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* ap, const double* afp, const lapack_int* ipiv,\n                    const double* b, lapack_int* ldb, double* x,\n                    lapack_int* ldx, double* ferr, double* berr, double* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_csprfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* ap,\n                    const lapack_complex_float* afp, const lapack_int* ipiv,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* ferr,\n                    float* berr, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zsprfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* ap,\n                    const lapack_complex_double* afp, const lapack_int* ipiv,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* ferr,\n                    double* berr, lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_chprfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* ap,\n                    const lapack_complex_float* afp, const lapack_int* ipiv,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* ferr,\n                    float* berr, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zhprfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* ap,\n                    const lapack_complex_double* afp, const lapack_int* ipiv,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* ferr,\n                    double* berr, lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_strrfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const float* a, lapack_int* lda,\n                    const float* b, lapack_int* ldb, const float* x,\n                    lapack_int* ldx, float* ferr, float* berr, float* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dtrrfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const double* a, lapack_int* lda,\n                    const double* b, lapack_int* ldb, const double* x,\n                    lapack_int* ldx, double* ferr, double* berr, double* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ctrrfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* b,\n                    lapack_int* ldb, const lapack_complex_float* x,\n                    lapack_int* ldx, float* ferr, float* berr,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_ztrrfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const lapack_complex_double* a,\n                    lapack_int* lda, const lapack_complex_double* b,\n                    lapack_int* ldb, const lapack_complex_double* x,\n                    lapack_int* ldx, double* ferr, double* berr,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_stprfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const float* ap, const float* b,\n                    lapack_int* ldb, const float* x, lapack_int* ldx,\n                    float* ferr, float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dtprfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const double* ap, const double* b,\n                    lapack_int* ldb, const double* x, lapack_int* ldx,\n                    double* ferr, double* berr, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_ctprfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const lapack_complex_float* ap,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    const lapack_complex_float* x, lapack_int* ldx, float* ferr,\n                    float* berr, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_ztprfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const lapack_complex_double* ap,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    const lapack_complex_double* x, lapack_int* ldx,\n                    double* ferr, double* berr, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_stbrfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* kd, lapack_int* nrhs, const float* ab,\n                    lapack_int* ldab, const float* b, lapack_int* ldb,\n                    const float* x, lapack_int* ldx, float* ferr, float* berr,\n                    float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dtbrfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* kd, lapack_int* nrhs, const double* ab,\n                    lapack_int* ldab, const double* b, lapack_int* ldb,\n                    const double* x, lapack_int* ldx, double* ferr,\n                    double* berr, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_ctbrfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* kd, lapack_int* nrhs,\n                    const lapack_complex_float* ab, lapack_int* ldab,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    const lapack_complex_float* x, lapack_int* ldx, float* ferr,\n                    float* berr, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_ztbrfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* kd, lapack_int* nrhs,\n                    const lapack_complex_double* ab, lapack_int* ldab,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    const lapack_complex_double* x, lapack_int* ldx,\n                    double* ferr, double* berr, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_sgetri( lapack_int* n, float* a, lapack_int* lda,\n                    const lapack_int* ipiv, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dgetri( lapack_int* n, double* a, lapack_int* lda,\n                    const lapack_int* ipiv, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cgetri( lapack_int* n, lapack_complex_float* a, lapack_int* lda,\n                    const lapack_int* ipiv, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zgetri( lapack_int* n, lapack_complex_double* a, lapack_int* lda,\n                    const lapack_int* ipiv, lapack_complex_double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_spotri( char* uplo, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_dpotri( char* uplo, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_cpotri( char* uplo, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_zpotri( char* uplo, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_dpftri( char* transr, char* uplo, lapack_int* n, double* a,\n                    lapack_int *info );\nvoid LAPACK_spftri( char* transr, char* uplo, lapack_int* n, float* a,\n                    lapack_int *info );\nvoid LAPACK_zpftri( char* transr, char* uplo, lapack_int* n,\n                    lapack_complex_double* a, lapack_int *info );\nvoid LAPACK_cpftri( char* transr, char* uplo, lapack_int* n,\n                    lapack_complex_float* a, lapack_int *info );\nvoid LAPACK_spptri( char* uplo, lapack_int* n, float* ap, lapack_int *info );\nvoid LAPACK_dpptri( char* uplo, lapack_int* n, double* ap, lapack_int *info );\nvoid LAPACK_cpptri( char* uplo, lapack_int* n, lapack_complex_float* ap,\n                    lapack_int *info );\nvoid LAPACK_zpptri( char* uplo, lapack_int* n, lapack_complex_double* ap,\n                    lapack_int *info );\nvoid LAPACK_ssytri( char* uplo, lapack_int* n, float* a, lapack_int* lda,\n                    const lapack_int* ipiv, float* work, lapack_int *info );\nvoid LAPACK_dsytri( char* uplo, lapack_int* n, double* a, lapack_int* lda,\n                    const lapack_int* ipiv, double* work, lapack_int *info );\nvoid LAPACK_csytri( char* uplo, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, const lapack_int* ipiv,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zsytri( char* uplo, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, const lapack_int* ipiv,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_chetri( char* uplo, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, const lapack_int* ipiv,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zhetri( char* uplo, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, const lapack_int* ipiv,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_ssptri( char* uplo, lapack_int* n, float* ap,\n                    const lapack_int* ipiv, float* work, lapack_int *info );\nvoid LAPACK_dsptri( char* uplo, lapack_int* n, double* ap,\n                    const lapack_int* ipiv, double* work, lapack_int *info );\nvoid LAPACK_csptri( char* uplo, lapack_int* n, lapack_complex_float* ap,\n                    const lapack_int* ipiv, lapack_complex_float* work,\n                    lapack_int *info );\nvoid LAPACK_zsptri( char* uplo, lapack_int* n, lapack_complex_double* ap,\n                    const lapack_int* ipiv, lapack_complex_double* work,\n                    lapack_int *info );\nvoid LAPACK_chptri( char* uplo, lapack_int* n, lapack_complex_float* ap,\n                    const lapack_int* ipiv, lapack_complex_float* work,\n                    lapack_int *info );\nvoid LAPACK_zhptri( char* uplo, lapack_int* n, lapack_complex_double* ap,\n                    const lapack_int* ipiv, lapack_complex_double* work,\n                    lapack_int *info );\nvoid LAPACK_strtri( char* uplo, char* diag, lapack_int* n, float* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_dtrtri( char* uplo, char* diag, lapack_int* n, double* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_ctrtri( char* uplo, char* diag, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_ztrtri( char* uplo, char* diag, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_dtftri( char* transr, char* uplo, char* diag, lapack_int* n,\n                    double* a, lapack_int *info );\nvoid LAPACK_stftri( char* transr, char* uplo, char* diag, lapack_int* n,\n                    float* a, lapack_int *info );\nvoid LAPACK_ztftri( char* transr, char* uplo, char* diag, lapack_int* n,\n                    lapack_complex_double* a, lapack_int *info );\nvoid LAPACK_ctftri( char* transr, char* uplo, char* diag, lapack_int* n,\n                    lapack_complex_float* a, lapack_int *info );\nvoid LAPACK_stptri( char* uplo, char* diag, lapack_int* n, float* ap,\n                    lapack_int *info );\nvoid LAPACK_dtptri( char* uplo, char* diag, lapack_int* n, double* ap,\n                    lapack_int *info );\nvoid LAPACK_ctptri( char* uplo, char* diag, lapack_int* n,\n                    lapack_complex_float* ap, lapack_int *info );\nvoid LAPACK_ztptri( char* uplo, char* diag, lapack_int* n,\n                    lapack_complex_double* ap, lapack_int *info );\nvoid LAPACK_sgeequ( lapack_int* m, lapack_int* n, const float* a,\n                    lapack_int* lda, float* r, float* c, float* rowcnd,\n                    float* colcnd, float* amax, lapack_int *info );\nvoid LAPACK_dgeequ( lapack_int* m, lapack_int* n, const double* a,\n                    lapack_int* lda, double* r, double* c, double* rowcnd,\n                    double* colcnd, double* amax, lapack_int *info );\nvoid LAPACK_cgeequ( lapack_int* m, lapack_int* n, const lapack_complex_float* a,\n                    lapack_int* lda, float* r, float* c, float* rowcnd,\n                    float* colcnd, float* amax, lapack_int *info );\nvoid LAPACK_zgeequ( lapack_int* m, lapack_int* n,\n                    const lapack_complex_double* a, lapack_int* lda, double* r,\n                    double* c, double* rowcnd, double* colcnd, double* amax,\n                    lapack_int *info );\nvoid LAPACK_dgeequb( lapack_int* m, lapack_int* n, const double* a,\n                     lapack_int* lda, double* r, double* c, double* rowcnd,\n                     double* colcnd, double* amax, lapack_int *info );\nvoid LAPACK_sgeequb( lapack_int* m, lapack_int* n, const float* a,\n                     lapack_int* lda, float* r, float* c, float* rowcnd,\n                     float* colcnd, float* amax, lapack_int *info );\nvoid LAPACK_zgeequb( lapack_int* m, lapack_int* n,\n                     const lapack_complex_double* a, lapack_int* lda, double* r,\n                     double* c, double* rowcnd, double* colcnd, double* amax,\n                     lapack_int *info );\nvoid LAPACK_cgeequb( lapack_int* m, lapack_int* n,\n                     const lapack_complex_float* a, lapack_int* lda, float* r,\n                     float* c, float* rowcnd, float* colcnd, float* amax,\n                     lapack_int *info );\nvoid LAPACK_sgbequ( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, const float* ab, lapack_int* ldab, float* r,\n                    float* c, float* rowcnd, float* colcnd, float* amax,\n                    lapack_int *info );\nvoid LAPACK_dgbequ( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, const double* ab, lapack_int* ldab,\n                    double* r, double* c, double* rowcnd, double* colcnd,\n                    double* amax, lapack_int *info );\nvoid LAPACK_cgbequ( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, const lapack_complex_float* ab,\n                    lapack_int* ldab, float* r, float* c, float* rowcnd,\n                    float* colcnd, float* amax, lapack_int *info );\nvoid LAPACK_zgbequ( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, const lapack_complex_double* ab,\n                    lapack_int* ldab, double* r, double* c, double* rowcnd,\n                    double* colcnd, double* amax, lapack_int *info );\nvoid LAPACK_dgbequb( lapack_int* m, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, const double* ab, lapack_int* ldab,\n                     double* r, double* c, double* rowcnd, double* colcnd,\n                     double* amax, lapack_int *info );\nvoid LAPACK_sgbequb( lapack_int* m, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, const float* ab, lapack_int* ldab,\n                     float* r, float* c, float* rowcnd, float* colcnd,\n                     float* amax, lapack_int *info );\nvoid LAPACK_zgbequb( lapack_int* m, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, const lapack_complex_double* ab,\n                     lapack_int* ldab, double* r, double* c, double* rowcnd,\n                     double* colcnd, double* amax, lapack_int *info );\nvoid LAPACK_cgbequb( lapack_int* m, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, const lapack_complex_float* ab,\n                     lapack_int* ldab, float* r, float* c, float* rowcnd,\n                     float* colcnd, float* amax, lapack_int *info );\nvoid LAPACK_spoequ( lapack_int* n, const float* a, lapack_int* lda, float* s,\n                    float* scond, float* amax, lapack_int *info );\nvoid LAPACK_dpoequ( lapack_int* n, const double* a, lapack_int* lda, double* s,\n                    double* scond, double* amax, lapack_int *info );\nvoid LAPACK_cpoequ( lapack_int* n, const lapack_complex_float* a,\n                    lapack_int* lda, float* s, float* scond, float* amax,\n                    lapack_int *info );\nvoid LAPACK_zpoequ( lapack_int* n, const lapack_complex_double* a,\n                    lapack_int* lda, double* s, double* scond, double* amax,\n                    lapack_int *info );\nvoid LAPACK_dpoequb( lapack_int* n, const double* a, lapack_int* lda, double* s,\n                     double* scond, double* amax, lapack_int *info );\nvoid LAPACK_spoequb( lapack_int* n, const float* a, lapack_int* lda, float* s,\n                     float* scond, float* amax, lapack_int *info );\nvoid LAPACK_zpoequb( lapack_int* n, const lapack_complex_double* a,\n                     lapack_int* lda, double* s, double* scond, double* amax,\n                     lapack_int *info );\nvoid LAPACK_cpoequb( lapack_int* n, const lapack_complex_float* a,\n                     lapack_int* lda, float* s, float* scond, float* amax,\n                     lapack_int *info );\nvoid LAPACK_sppequ( char* uplo, lapack_int* n, const float* ap, float* s,\n                    float* scond, float* amax, lapack_int *info );\nvoid LAPACK_dppequ( char* uplo, lapack_int* n, const double* ap, double* s,\n                    double* scond, double* amax, lapack_int *info );\nvoid LAPACK_cppequ( char* uplo, lapack_int* n, const lapack_complex_float* ap,\n                    float* s, float* scond, float* amax, lapack_int *info );\nvoid LAPACK_zppequ( char* uplo, lapack_int* n, const lapack_complex_double* ap,\n                    double* s, double* scond, double* amax, lapack_int *info );\nvoid LAPACK_spbequ( char* uplo, lapack_int* n, lapack_int* kd, const float* ab,\n                    lapack_int* ldab, float* s, float* scond, float* amax,\n                    lapack_int *info );\nvoid LAPACK_dpbequ( char* uplo, lapack_int* n, lapack_int* kd, const double* ab,\n                    lapack_int* ldab, double* s, double* scond, double* amax,\n                    lapack_int *info );\nvoid LAPACK_cpbequ( char* uplo, lapack_int* n, lapack_int* kd,\n                    const lapack_complex_float* ab, lapack_int* ldab, float* s,\n                    float* scond, float* amax, lapack_int *info );\nvoid LAPACK_zpbequ( char* uplo, lapack_int* n, lapack_int* kd,\n                    const lapack_complex_double* ab, lapack_int* ldab,\n                    double* s, double* scond, double* amax, lapack_int *info );\nvoid LAPACK_dsyequb( char* uplo, lapack_int* n, const double* a,\n                     lapack_int* lda, double* s, double* scond, double* amax,\n                     double* work, lapack_int *info );\nvoid LAPACK_ssyequb( char* uplo, lapack_int* n, const float* a, lapack_int* lda,\n                     float* s, float* scond, float* amax, float* work,\n                     lapack_int *info );\nvoid LAPACK_zsyequb( char* uplo, lapack_int* n, const lapack_complex_double* a,\n                     lapack_int* lda, double* s, double* scond, double* amax,\n                     lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_csyequb( char* uplo, lapack_int* n, const lapack_complex_float* a,\n                     lapack_int* lda, float* s, float* scond, float* amax,\n                     lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zheequb( char* uplo, lapack_int* n, const lapack_complex_double* a,\n                     lapack_int* lda, double* s, double* scond, double* amax,\n                     lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_cheequb( char* uplo, lapack_int* n, const lapack_complex_float* a,\n                     lapack_int* lda, float* s, float* scond, float* amax,\n                     lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_sgesv( lapack_int* n, lapack_int* nrhs, float* a, lapack_int* lda,\n                   lapack_int* ipiv, float* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_dgesv( lapack_int* n, lapack_int* nrhs, double* a, lapack_int* lda,\n                   lapack_int* ipiv, double* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_cgesv( lapack_int* n, lapack_int* nrhs, lapack_complex_float* a,\n                   lapack_int* lda, lapack_int* ipiv, lapack_complex_float* b,\n                   lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zgesv( lapack_int* n, lapack_int* nrhs, lapack_complex_double* a,\n                   lapack_int* lda, lapack_int* ipiv, lapack_complex_double* b,\n                   lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dsgesv( lapack_int* n, lapack_int* nrhs, double* a, lapack_int* lda,\n                    lapack_int* ipiv, double* b, lapack_int* ldb, double* x,\n                    lapack_int* ldx, double* work, float* swork,\n                    lapack_int* iter, lapack_int *info );\nvoid LAPACK_zcgesv( lapack_int* n, lapack_int* nrhs, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* ipiv, lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                    lapack_complex_double* work, lapack_complex_float* swork,\n                    double* rwork, lapack_int* iter, lapack_int *info );\nvoid LAPACK_sgesvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                    float* a, lapack_int* lda, float* af, lapack_int* ldaf,\n                    lapack_int* ipiv, char* equed, float* r, float* c, float* b,\n                    lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dgesvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                    double* a, lapack_int* lda, double* af, lapack_int* ldaf,\n                    lapack_int* ipiv, char* equed, double* r, double* c,\n                    double* b, lapack_int* ldb, double* x, lapack_int* ldx,\n                    double* rcond, double* ferr, double* berr, double* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cgesvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* af, lapack_int* ldaf,\n                    lapack_int* ipiv, char* equed, float* r, float* c,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zgesvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* af, lapack_int* ldaf,\n                    lapack_int* ipiv, char* equed, double* r, double* c,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                    double* ferr, double* berr, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_dgesvxx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                     double* a, lapack_int* lda, double* af, lapack_int* ldaf,\n                     lapack_int* ipiv, char* equed, double* r, double* c,\n                     double* b, lapack_int* ldb, double* x, lapack_int* ldx,\n                     double* rcond, double* rpvgrw, double* berr,\n                     lapack_int* n_err_bnds, double* err_bnds_norm,\n                     double* err_bnds_comp, lapack_int* nparams, double* params,\n                     double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_sgesvxx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                     float* a, lapack_int* lda, float* af, lapack_int* ldaf,\n                     lapack_int* ipiv, char* equed, float* r, float* c,\n                     float* b, lapack_int* ldb, float* x, lapack_int* ldx,\n                     float* rcond, float* rpvgrw, float* berr,\n                     lapack_int* n_err_bnds, float* err_bnds_norm,\n                     float* err_bnds_comp, lapack_int* nparams, float* params,\n                     float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_zgesvxx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                     lapack_complex_double* a, lapack_int* lda,\n                     lapack_complex_double* af, lapack_int* ldaf,\n                     lapack_int* ipiv, char* equed, double* r, double* c,\n                     lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                     double* rpvgrw, double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params,\n                     lapack_complex_double* work, double* rwork,\n                     lapack_int *info );\nvoid LAPACK_cgesvxx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                     lapack_complex_float* a, lapack_int* lda,\n                     lapack_complex_float* af, lapack_int* ldaf,\n                     lapack_int* ipiv, char* equed, float* r, float* c,\n                     lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                     float* rpvgrw, float* berr, lapack_int* n_err_bnds,\n                     float* err_bnds_norm, float* err_bnds_comp,\n                     lapack_int* nparams, float* params,\n                     lapack_complex_float* work, float* rwork,\n                     lapack_int *info );\nvoid LAPACK_sgbsv( lapack_int* n, lapack_int* kl, lapack_int* ku,\n                   lapack_int* nrhs, float* ab, lapack_int* ldab,\n                   lapack_int* ipiv, float* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_dgbsv( lapack_int* n, lapack_int* kl, lapack_int* ku,\n                   lapack_int* nrhs, double* ab, lapack_int* ldab,\n                   lapack_int* ipiv, double* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_cgbsv( lapack_int* n, lapack_int* kl, lapack_int* ku,\n                   lapack_int* nrhs, lapack_complex_float* ab, lapack_int* ldab,\n                   lapack_int* ipiv, lapack_complex_float* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_zgbsv( lapack_int* n, lapack_int* kl, lapack_int* ku,\n                   lapack_int* nrhs, lapack_complex_double* ab,\n                   lapack_int* ldab, lapack_int* ipiv, lapack_complex_double* b,\n                   lapack_int* ldb, lapack_int *info );\nvoid LAPACK_sgbsvx( char* fact, char* trans, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, lapack_int* nrhs, float* ab,\n                    lapack_int* ldab, float* afb, lapack_int* ldafb,\n                    lapack_int* ipiv, char* equed, float* r, float* c, float* b,\n                    lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dgbsvx( char* fact, char* trans, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, lapack_int* nrhs, double* ab,\n                    lapack_int* ldab, double* afb, lapack_int* ldafb,\n                    lapack_int* ipiv, char* equed, double* r, double* c,\n                    double* b, lapack_int* ldb, double* x, lapack_int* ldx,\n                    double* rcond, double* ferr, double* berr, double* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cgbsvx( char* fact, char* trans, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, lapack_int* nrhs, lapack_complex_float* ab,\n                    lapack_int* ldab, lapack_complex_float* afb,\n                    lapack_int* ldafb, lapack_int* ipiv, char* equed, float* r,\n                    float* c, lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zgbsvx( char* fact, char* trans, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, lapack_int* nrhs, lapack_complex_double* ab,\n                    lapack_int* ldab, lapack_complex_double* afb,\n                    lapack_int* ldafb, lapack_int* ipiv, char* equed, double* r,\n                    double* c, lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                    double* ferr, double* berr, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_dgbsvxx( char* fact, char* trans, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, lapack_int* nrhs, double* ab,\n                     lapack_int* ldab, double* afb, lapack_int* ldafb,\n                     lapack_int* ipiv, char* equed, double* r, double* c,\n                     double* b, lapack_int* ldb, double* x, lapack_int* ldx,\n                     double* rcond, double* rpvgrw, double* berr,\n                     lapack_int* n_err_bnds, double* err_bnds_norm,\n                     double* err_bnds_comp, lapack_int* nparams, double* params,\n                     double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_sgbsvxx( char* fact, char* trans, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, lapack_int* nrhs, float* ab,\n                     lapack_int* ldab, float* afb, lapack_int* ldafb,\n                     lapack_int* ipiv, char* equed, float* r, float* c,\n                     float* b, lapack_int* ldb, float* x, lapack_int* ldx,\n                     float* rcond, float* rpvgrw, float* berr,\n                     lapack_int* n_err_bnds, float* err_bnds_norm,\n                     float* err_bnds_comp, lapack_int* nparams, float* params,\n                     float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_zgbsvxx( char* fact, char* trans, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, lapack_int* nrhs,\n                     lapack_complex_double* ab, lapack_int* ldab,\n                     lapack_complex_double* afb, lapack_int* ldafb,\n                     lapack_int* ipiv, char* equed, double* r, double* c,\n                     lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                     double* rpvgrw, double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params,\n                     lapack_complex_double* work, double* rwork,\n                     lapack_int *info );\nvoid LAPACK_cgbsvxx( char* fact, char* trans, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, lapack_int* nrhs, lapack_complex_float* ab,\n                     lapack_int* ldab, lapack_complex_float* afb,\n                     lapack_int* ldafb, lapack_int* ipiv, char* equed, float* r,\n                     float* c, lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                     float* rpvgrw, float* berr, lapack_int* n_err_bnds,\n                     float* err_bnds_norm, float* err_bnds_comp,\n                     lapack_int* nparams, float* params,\n                     lapack_complex_float* work, float* rwork,\n                     lapack_int *info );\nvoid LAPACK_sgtsv( lapack_int* n, lapack_int* nrhs, float* dl, float* d,\n                   float* du, float* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dgtsv( lapack_int* n, lapack_int* nrhs, double* dl, double* d,\n                   double* du, double* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_cgtsv( lapack_int* n, lapack_int* nrhs, lapack_complex_float* dl,\n                   lapack_complex_float* d, lapack_complex_float* du,\n                   lapack_complex_float* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zgtsv( lapack_int* n, lapack_int* nrhs, lapack_complex_double* dl,\n                   lapack_complex_double* d, lapack_complex_double* du,\n                   lapack_complex_double* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_sgtsvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                    const float* dl, const float* d, const float* du,\n                    float* dlf, float* df, float* duf, float* du2,\n                    lapack_int* ipiv, const float* b, lapack_int* ldb, float* x,\n                    lapack_int* ldx, float* rcond, float* ferr, float* berr,\n                    float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dgtsvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                    const double* dl, const double* d, const double* du,\n                    double* dlf, double* df, double* duf, double* du2,\n                    lapack_int* ipiv, const double* b, lapack_int* ldb,\n                    double* x, lapack_int* ldx, double* rcond, double* ferr,\n                    double* berr, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_cgtsvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* dl,\n                    const lapack_complex_float* d,\n                    const lapack_complex_float* du, lapack_complex_float* dlf,\n                    lapack_complex_float* df, lapack_complex_float* duf,\n                    lapack_complex_float* du2, lapack_int* ipiv,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zgtsvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* dl,\n                    const lapack_complex_double* d,\n                    const lapack_complex_double* du, lapack_complex_double* dlf,\n                    lapack_complex_double* df, lapack_complex_double* duf,\n                    lapack_complex_double* du2, lapack_int* ipiv,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                    double* ferr, double* berr, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_sposv( char* uplo, lapack_int* n, lapack_int* nrhs, float* a,\n                   lapack_int* lda, float* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_dposv( char* uplo, lapack_int* n, lapack_int* nrhs, double* a,\n                   lapack_int* lda, double* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_cposv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_float* a, lapack_int* lda,\n                   lapack_complex_float* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zposv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_double* a, lapack_int* lda,\n                   lapack_complex_double* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_dsposv( char* uplo, lapack_int* n, lapack_int* nrhs, double* a,\n                    lapack_int* lda, double* b, lapack_int* ldb, double* x,\n                    lapack_int* ldx, double* work, float* swork,\n                    lapack_int* iter, lapack_int *info );\nvoid LAPACK_zcposv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx,\n                    lapack_complex_double* work, lapack_complex_float* swork,\n                    double* rwork, lapack_int* iter, lapack_int *info );\nvoid LAPACK_sposvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    float* a, lapack_int* lda, float* af, lapack_int* ldaf,\n                    char* equed, float* s, float* b, lapack_int* ldb, float* x,\n                    lapack_int* ldx, float* rcond, float* ferr, float* berr,\n                    float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dposvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    double* a, lapack_int* lda, double* af, lapack_int* ldaf,\n                    char* equed, double* s, double* b, lapack_int* ldb,\n                    double* x, lapack_int* ldx, double* rcond, double* ferr,\n                    double* berr, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_cposvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* af, lapack_int* ldaf, char* equed,\n                    float* s, lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zposvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* af, lapack_int* ldaf, char* equed,\n                    double* s, lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                    double* ferr, double* berr, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_dposvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                     double* a, lapack_int* lda, double* af, lapack_int* ldaf,\n                     char* equed, double* s, double* b, lapack_int* ldb,\n                     double* x, lapack_int* ldx, double* rcond, double* rpvgrw,\n                     double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params, double* work,\n                     lapack_int* iwork, lapack_int *info );\nvoid LAPACK_sposvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                     float* a, lapack_int* lda, float* af, lapack_int* ldaf,\n                     char* equed, float* s, float* b, lapack_int* ldb, float* x,\n                     lapack_int* ldx, float* rcond, float* rpvgrw, float* berr,\n                     lapack_int* n_err_bnds, float* err_bnds_norm,\n                     float* err_bnds_comp, lapack_int* nparams, float* params,\n                     float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_zposvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                     lapack_complex_double* a, lapack_int* lda,\n                     lapack_complex_double* af, lapack_int* ldaf, char* equed,\n                     double* s, lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                     double* rpvgrw, double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params,\n                     lapack_complex_double* work, double* rwork,\n                     lapack_int *info );\nvoid LAPACK_cposvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                     lapack_complex_float* a, lapack_int* lda,\n                     lapack_complex_float* af, lapack_int* ldaf, char* equed,\n                     float* s, lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                     float* rpvgrw, float* berr, lapack_int* n_err_bnds,\n                     float* err_bnds_norm, float* err_bnds_comp,\n                     lapack_int* nparams, float* params,\n                     lapack_complex_float* work, float* rwork,\n                     lapack_int *info );\nvoid LAPACK_sppsv( char* uplo, lapack_int* n, lapack_int* nrhs, float* ap,\n                   float* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dppsv( char* uplo, lapack_int* n, lapack_int* nrhs, double* ap,\n                   double* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_cppsv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_float* ap, lapack_complex_float* b,\n                   lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zppsv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_double* ap, lapack_complex_double* b,\n                   lapack_int* ldb, lapack_int *info );\nvoid LAPACK_sppsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    float* ap, float* afp, char* equed, float* s, float* b,\n                    lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dppsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    double* ap, double* afp, char* equed, double* s, double* b,\n                    lapack_int* ldb, double* x, lapack_int* ldx, double* rcond,\n                    double* ferr, double* berr, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_cppsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_float* ap, lapack_complex_float* afp,\n                    char* equed, float* s, lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,\n                    float* rcond, float* ferr, float* berr,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zppsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_double* ap, lapack_complex_double* afp,\n                    char* equed, double* s, lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                    double* rcond, double* ferr, double* berr,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_spbsv( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                   float* ab, lapack_int* ldab, float* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_dpbsv( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                   double* ab, lapack_int* ldab, double* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_cpbsv( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                   lapack_complex_float* ab, lapack_int* ldab,\n                   lapack_complex_float* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zpbsv( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                   lapack_complex_double* ab, lapack_int* ldab,\n                   lapack_complex_double* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_spbsvx( char* fact, char* uplo, lapack_int* n, lapack_int* kd,\n                    lapack_int* nrhs, float* ab, lapack_int* ldab, float* afb,\n                    lapack_int* ldafb, char* equed, float* s, float* b,\n                    lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dpbsvx( char* fact, char* uplo, lapack_int* n, lapack_int* kd,\n                    lapack_int* nrhs, double* ab, lapack_int* ldab, double* afb,\n                    lapack_int* ldafb, char* equed, double* s, double* b,\n                    lapack_int* ldb, double* x, lapack_int* ldx, double* rcond,\n                    double* ferr, double* berr, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_cpbsvx( char* fact, char* uplo, lapack_int* n, lapack_int* kd,\n                    lapack_int* nrhs, lapack_complex_float* ab,\n                    lapack_int* ldab, lapack_complex_float* afb,\n                    lapack_int* ldafb, char* equed, float* s,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zpbsvx( char* fact, char* uplo, lapack_int* n, lapack_int* kd,\n                    lapack_int* nrhs, lapack_complex_double* ab,\n                    lapack_int* ldab, lapack_complex_double* afb,\n                    lapack_int* ldafb, char* equed, double* s,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                    double* ferr, double* berr, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_sptsv( lapack_int* n, lapack_int* nrhs, float* d, float* e,\n                   float* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dptsv( lapack_int* n, lapack_int* nrhs, double* d, double* e,\n                   double* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_cptsv( lapack_int* n, lapack_int* nrhs, float* d,\n                   lapack_complex_float* e, lapack_complex_float* b,\n                   lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zptsv( lapack_int* n, lapack_int* nrhs, double* d,\n                   lapack_complex_double* e, lapack_complex_double* b,\n                   lapack_int* ldb, lapack_int *info );\nvoid LAPACK_sptsvx( char* fact, lapack_int* n, lapack_int* nrhs, const float* d,\n                    const float* e, float* df, float* ef, const float* b,\n                    lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, float* work, lapack_int *info );\nvoid LAPACK_dptsvx( char* fact, lapack_int* n, lapack_int* nrhs,\n                    const double* d, const double* e, double* df, double* ef,\n                    const double* b, lapack_int* ldb, double* x,\n                    lapack_int* ldx, double* rcond, double* ferr, double* berr,\n                    double* work, lapack_int *info );\nvoid LAPACK_cptsvx( char* fact, lapack_int* n, lapack_int* nrhs, const float* d,\n                    const lapack_complex_float* e, float* df,\n                    lapack_complex_float* ef, const lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,\n                    float* rcond, float* ferr, float* berr,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zptsvx( char* fact, lapack_int* n, lapack_int* nrhs,\n                    const double* d, const lapack_complex_double* e, double* df,\n                    lapack_complex_double* ef, const lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                    double* rcond, double* ferr, double* berr,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_ssysv( char* uplo, lapack_int* n, lapack_int* nrhs, float* a,\n                   lapack_int* lda, lapack_int* ipiv, float* b, lapack_int* ldb,\n                   float* work, lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dsysv( char* uplo, lapack_int* n, lapack_int* nrhs, double* a,\n                   lapack_int* lda, lapack_int* ipiv, double* b,\n                   lapack_int* ldb, double* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_csysv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_float* a, lapack_int* lda, lapack_int* ipiv,\n                   lapack_complex_float* b, lapack_int* ldb,\n                   lapack_complex_float* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_zsysv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_double* a, lapack_int* lda, lapack_int* ipiv,\n                   lapack_complex_double* b, lapack_int* ldb,\n                   lapack_complex_double* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_ssysvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const float* a, lapack_int* lda, float* af,\n                    lapack_int* ldaf, lapack_int* ipiv, const float* b,\n                    lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dsysvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* a, lapack_int* lda, double* af,\n                    lapack_int* ldaf, lapack_int* ipiv, const double* b,\n                    lapack_int* ldb, double* x, lapack_int* ldx, double* rcond,\n                    double* ferr, double* berr, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_csysvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* af, lapack_int* ldaf,\n                    lapack_int* ipiv, const lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,\n                    float* rcond, float* ferr, float* berr,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zsysvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* af, lapack_int* ldaf,\n                    lapack_int* ipiv, const lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                    double* rcond, double* ferr, double* berr,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_dsysvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                     double* a, lapack_int* lda, double* af, lapack_int* ldaf,\n                     lapack_int* ipiv, char* equed, double* s, double* b,\n                     lapack_int* ldb, double* x, lapack_int* ldx, double* rcond,\n                     double* rpvgrw, double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params, double* work,\n                     lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ssysvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                     float* a, lapack_int* lda, float* af, lapack_int* ldaf,\n                     lapack_int* ipiv, char* equed, float* s, float* b,\n                     lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,\n                     float* rpvgrw, float* berr, lapack_int* n_err_bnds,\n                     float* err_bnds_norm, float* err_bnds_comp,\n                     lapack_int* nparams, float* params, float* work,\n                     lapack_int* iwork, lapack_int *info );\nvoid LAPACK_zsysvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                     lapack_complex_double* a, lapack_int* lda,\n                     lapack_complex_double* af, lapack_int* ldaf,\n                     lapack_int* ipiv, char* equed, double* s,\n                     lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                     double* rpvgrw, double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params,\n                     lapack_complex_double* work, double* rwork,\n                     lapack_int *info );\nvoid LAPACK_csysvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                     lapack_complex_float* a, lapack_int* lda,\n                     lapack_complex_float* af, lapack_int* ldaf,\n                     lapack_int* ipiv, char* equed, float* s,\n                     lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                     float* rpvgrw, float* berr, lapack_int* n_err_bnds,\n                     float* err_bnds_norm, float* err_bnds_comp,\n                     lapack_int* nparams, float* params,\n                     lapack_complex_float* work, float* rwork,\n                     lapack_int *info );\nvoid LAPACK_chesv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_float* a, lapack_int* lda, lapack_int* ipiv,\n                   lapack_complex_float* b, lapack_int* ldb,\n                   lapack_complex_float* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_zhesv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_double* a, lapack_int* lda, lapack_int* ipiv,\n                   lapack_complex_double* b, lapack_int* ldb,\n                   lapack_complex_double* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_chesvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* af, lapack_int* ldaf,\n                    lapack_int* ipiv, const lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,\n                    float* rcond, float* ferr, float* berr,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zhesvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* af, lapack_int* ldaf,\n                    lapack_int* ipiv, const lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                    double* rcond, double* ferr, double* berr,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_zhesvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                     lapack_complex_double* a, lapack_int* lda,\n                     lapack_complex_double* af, lapack_int* ldaf,\n                     lapack_int* ipiv, char* equed, double* s,\n                     lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                     double* rpvgrw, double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params,\n                     lapack_complex_double* work, double* rwork,\n                     lapack_int *info );\nvoid LAPACK_chesvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                     lapack_complex_float* a, lapack_int* lda,\n                     lapack_complex_float* af, lapack_int* ldaf,\n                     lapack_int* ipiv, char* equed, float* s,\n                     lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                     float* rpvgrw, float* berr, lapack_int* n_err_bnds,\n                     float* err_bnds_norm, float* err_bnds_comp,\n                     lapack_int* nparams, float* params,\n                     lapack_complex_float* work, float* rwork,\n                     lapack_int *info );\nvoid LAPACK_sspsv( char* uplo, lapack_int* n, lapack_int* nrhs, float* ap,\n                   lapack_int* ipiv, float* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_dspsv( char* uplo, lapack_int* n, lapack_int* nrhs, double* ap,\n                   lapack_int* ipiv, double* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_cspsv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_float* ap, lapack_int* ipiv,\n                   lapack_complex_float* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zspsv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_double* ap, lapack_int* ipiv,\n                   lapack_complex_double* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_sspsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const float* ap, float* afp, lapack_int* ipiv,\n                    const float* b, lapack_int* ldb, float* x, lapack_int* ldx,\n                    float* rcond, float* ferr, float* berr, float* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dspsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* ap, double* afp, lapack_int* ipiv,\n                    const double* b, lapack_int* ldb, double* x,\n                    lapack_int* ldx, double* rcond, double* ferr, double* berr,\n                    double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cspsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* ap, lapack_complex_float* afp,\n                    lapack_int* ipiv, const lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,\n                    float* rcond, float* ferr, float* berr,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zspsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* ap, lapack_complex_double* afp,\n                    lapack_int* ipiv, const lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                    double* rcond, double* ferr, double* berr,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_chpsv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_float* ap, lapack_int* ipiv,\n                   lapack_complex_float* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zhpsv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_double* ap, lapack_int* ipiv,\n                   lapack_complex_double* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_chpsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* ap, lapack_complex_float* afp,\n                    lapack_int* ipiv, const lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,\n                    float* rcond, float* ferr, float* berr,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zhpsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* ap, lapack_complex_double* afp,\n                    lapack_int* ipiv, const lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                    double* rcond, double* ferr, double* berr,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_sgeqrf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    float* tau, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dgeqrf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    double* tau, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cgeqrf( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* tau,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zgeqrf( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sgeqpf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int* jpvt, float* tau, float* work,\n                    lapack_int *info );\nvoid LAPACK_dgeqpf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int* jpvt, double* tau, double* work,\n                    lapack_int *info );\nvoid LAPACK_cgeqpf( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int* jpvt,\n                    lapack_complex_float* tau, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zgeqpf( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* jpvt,\n                    lapack_complex_double* tau, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_sgeqp3( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int* jpvt, float* tau, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dgeqp3( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int* jpvt, double* tau, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cgeqp3( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int* jpvt,\n                    lapack_complex_float* tau, lapack_complex_float* work,\n                    lapack_int* lwork, float* rwork, lapack_int *info );\nvoid LAPACK_zgeqp3( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* jpvt,\n                    lapack_complex_double* tau, lapack_complex_double* work,\n                    lapack_int* lwork, double* rwork, lapack_int *info );\nvoid LAPACK_sorgqr( lapack_int* m, lapack_int* n, lapack_int* k, float* a,\n                    lapack_int* lda, const float* tau, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dorgqr( lapack_int* m, lapack_int* n, lapack_int* k, double* a,\n                    lapack_int* lda, const double* tau, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sormqr( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const float* a, lapack_int* lda,\n                    const float* tau, float* c, lapack_int* ldc, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dormqr( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const double* a, lapack_int* lda,\n                    const double* tau, double* c, lapack_int* ldc, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cungqr( lapack_int* m, lapack_int* n, lapack_int* k,\n                    lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* tau, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zungqr( lapack_int* m, lapack_int* n, lapack_int* k,\n                    lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cunmqr( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* tau,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zunmqr( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const lapack_complex_double* a,\n                    lapack_int* lda, const lapack_complex_double* tau,\n                    lapack_complex_double* c, lapack_int* ldc,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sgelqf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    float* tau, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dgelqf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    double* tau, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cgelqf( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* tau,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zgelqf( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sorglq( lapack_int* m, lapack_int* n, lapack_int* k, float* a,\n                    lapack_int* lda, const float* tau, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dorglq( lapack_int* m, lapack_int* n, lapack_int* k, double* a,\n                    lapack_int* lda, const double* tau, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sormlq( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const float* a, lapack_int* lda,\n                    const float* tau, float* c, lapack_int* ldc, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dormlq( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const double* a, lapack_int* lda,\n                    const double* tau, double* c, lapack_int* ldc, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cunglq( lapack_int* m, lapack_int* n, lapack_int* k,\n                    lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* tau, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zunglq( lapack_int* m, lapack_int* n, lapack_int* k,\n                    lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cunmlq( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* tau,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zunmlq( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const lapack_complex_double* a,\n                    lapack_int* lda, const lapack_complex_double* tau,\n                    lapack_complex_double* c, lapack_int* ldc,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sgeqlf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    float* tau, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dgeqlf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    double* tau, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cgeqlf( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* tau,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zgeqlf( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sorgql( lapack_int* m, lapack_int* n, lapack_int* k, float* a,\n                    lapack_int* lda, const float* tau, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dorgql( lapack_int* m, lapack_int* n, lapack_int* k, double* a,\n                    lapack_int* lda, const double* tau, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cungql( lapack_int* m, lapack_int* n, lapack_int* k,\n                    lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* tau, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zungql( lapack_int* m, lapack_int* n, lapack_int* k,\n                    lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sormql( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const float* a, lapack_int* lda,\n                    const float* tau, float* c, lapack_int* ldc, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dormql( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const double* a, lapack_int* lda,\n                    const double* tau, double* c, lapack_int* ldc, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cunmql( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* tau,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zunmql( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const lapack_complex_double* a,\n                    lapack_int* lda, const lapack_complex_double* tau,\n                    lapack_complex_double* c, lapack_int* ldc,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sgerqf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    float* tau, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dgerqf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    double* tau, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cgerqf( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* tau,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zgerqf( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sorgrq( lapack_int* m, lapack_int* n, lapack_int* k, float* a,\n                    lapack_int* lda, const float* tau, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dorgrq( lapack_int* m, lapack_int* n, lapack_int* k, double* a,\n                    lapack_int* lda, const double* tau, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cungrq( lapack_int* m, lapack_int* n, lapack_int* k,\n                    lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* tau, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zungrq( lapack_int* m, lapack_int* n, lapack_int* k,\n                    lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sormrq( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const float* a, lapack_int* lda,\n                    const float* tau, float* c, lapack_int* ldc, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dormrq( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const double* a, lapack_int* lda,\n                    const double* tau, double* c, lapack_int* ldc, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cunmrq( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* tau,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zunmrq( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const lapack_complex_double* a,\n                    lapack_int* lda, const lapack_complex_double* tau,\n                    lapack_complex_double* c, lapack_int* ldc,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_stzrzf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    float* tau, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dtzrzf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    double* tau, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_ctzrzf( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* tau,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_ztzrzf( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sormrz( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, lapack_int* l, const float* a,\n                    lapack_int* lda, const float* tau, float* c,\n                    lapack_int* ldc, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dormrz( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, lapack_int* l, const double* a,\n                    lapack_int* lda, const double* tau, double* c,\n                    lapack_int* ldc, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cunmrz( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, lapack_int* l, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* tau,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zunmrz( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, lapack_int* l,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* tau, lapack_complex_double* c,\n                    lapack_int* ldc, lapack_complex_double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sggqrf( lapack_int* n, lapack_int* m, lapack_int* p, float* a,\n                    lapack_int* lda, float* taua, float* b, lapack_int* ldb,\n                    float* taub, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dggqrf( lapack_int* n, lapack_int* m, lapack_int* p, double* a,\n                    lapack_int* lda, double* taua, double* b, lapack_int* ldb,\n                    double* taub, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cggqrf( lapack_int* n, lapack_int* m, lapack_int* p,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* taua, lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* taub,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zggqrf( lapack_int* n, lapack_int* m, lapack_int* p,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* taua, lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* taub,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sggrqf( lapack_int* m, lapack_int* p, lapack_int* n, float* a,\n                    lapack_int* lda, float* taua, float* b, lapack_int* ldb,\n                    float* taub, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dggrqf( lapack_int* m, lapack_int* p, lapack_int* n, double* a,\n                    lapack_int* lda, double* taua, double* b, lapack_int* ldb,\n                    double* taub, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cggrqf( lapack_int* m, lapack_int* p, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* taua, lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* taub,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zggrqf( lapack_int* m, lapack_int* p, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* taua, lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* taub,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sgebrd( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    float* d, float* e, float* tauq, float* taup, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dgebrd( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    double* d, double* e, double* tauq, double* taup,\n                    double* work, lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cgebrd( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, float* d, float* e,\n                    lapack_complex_float* tauq, lapack_complex_float* taup,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zgebrd( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, double* d, double* e,\n                    lapack_complex_double* tauq, lapack_complex_double* taup,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sgbbrd( char* vect, lapack_int* m, lapack_int* n, lapack_int* ncc,\n                    lapack_int* kl, lapack_int* ku, float* ab, lapack_int* ldab,\n                    float* d, float* e, float* q, lapack_int* ldq, float* pt,\n                    lapack_int* ldpt, float* c, lapack_int* ldc, float* work,\n                    lapack_int *info );\nvoid LAPACK_dgbbrd( char* vect, lapack_int* m, lapack_int* n, lapack_int* ncc,\n                    lapack_int* kl, lapack_int* ku, double* ab,\n                    lapack_int* ldab, double* d, double* e, double* q,\n                    lapack_int* ldq, double* pt, lapack_int* ldpt, double* c,\n                    lapack_int* ldc, double* work, lapack_int *info );\nvoid LAPACK_cgbbrd( char* vect, lapack_int* m, lapack_int* n, lapack_int* ncc,\n                    lapack_int* kl, lapack_int* ku, lapack_complex_float* ab,\n                    lapack_int* ldab, float* d, float* e,\n                    lapack_complex_float* q, lapack_int* ldq,\n                    lapack_complex_float* pt, lapack_int* ldpt,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zgbbrd( char* vect, lapack_int* m, lapack_int* n, lapack_int* ncc,\n                    lapack_int* kl, lapack_int* ku, lapack_complex_double* ab,\n                    lapack_int* ldab, double* d, double* e,\n                    lapack_complex_double* q, lapack_int* ldq,\n                    lapack_complex_double* pt, lapack_int* ldpt,\n                    lapack_complex_double* c, lapack_int* ldc,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_sorgbr( char* vect, lapack_int* m, lapack_int* n, lapack_int* k,\n                    float* a, lapack_int* lda, const float* tau, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dorgbr( char* vect, lapack_int* m, lapack_int* n, lapack_int* k,\n                    double* a, lapack_int* lda, const double* tau, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sormbr( char* vect, char* side, char* trans, lapack_int* m,\n                    lapack_int* n, lapack_int* k, const float* a,\n                    lapack_int* lda, const float* tau, float* c,\n                    lapack_int* ldc, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dormbr( char* vect, char* side, char* trans, lapack_int* m,\n                    lapack_int* n, lapack_int* k, const double* a,\n                    lapack_int* lda, const double* tau, double* c,\n                    lapack_int* ldc, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cungbr( char* vect, lapack_int* m, lapack_int* n, lapack_int* k,\n                    lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* tau, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zungbr( char* vect, lapack_int* m, lapack_int* n, lapack_int* k,\n                    lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cunmbr( char* vect, char* side, char* trans, lapack_int* m,\n                    lapack_int* n, lapack_int* k, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* tau,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zunmbr( char* vect, char* side, char* trans, lapack_int* m,\n                    lapack_int* n, lapack_int* k,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* tau, lapack_complex_double* c,\n                    lapack_int* ldc, lapack_complex_double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sbdsqr( char* uplo, lapack_int* n, lapack_int* ncvt,\n                    lapack_int* nru, lapack_int* ncc, float* d, float* e,\n                    float* vt, lapack_int* ldvt, float* u, lapack_int* ldu,\n                    float* c, lapack_int* ldc, float* work, lapack_int *info );\nvoid LAPACK_dbdsqr( char* uplo, lapack_int* n, lapack_int* ncvt,\n                    lapack_int* nru, lapack_int* ncc, double* d, double* e,\n                    double* vt, lapack_int* ldvt, double* u, lapack_int* ldu,\n                    double* c, lapack_int* ldc, double* work,\n                    lapack_int *info );\nvoid LAPACK_cbdsqr( char* uplo, lapack_int* n, lapack_int* ncvt,\n                    lapack_int* nru, lapack_int* ncc, float* d, float* e,\n                    lapack_complex_float* vt, lapack_int* ldvt,\n                    lapack_complex_float* u, lapack_int* ldu,\n                    lapack_complex_float* c, lapack_int* ldc, float* work,\n                    lapack_int *info );\nvoid LAPACK_zbdsqr( char* uplo, lapack_int* n, lapack_int* ncvt,\n                    lapack_int* nru, lapack_int* ncc, double* d, double* e,\n                    lapack_complex_double* vt, lapack_int* ldvt,\n                    lapack_complex_double* u, lapack_int* ldu,\n                    lapack_complex_double* c, lapack_int* ldc, double* work,\n                    lapack_int *info );\nvoid LAPACK_sbdsdc( char* uplo, char* compq, lapack_int* n, float* d, float* e,\n                    float* u, lapack_int* ldu, float* vt, lapack_int* ldvt,\n                    float* q, lapack_int* iq, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dbdsdc( char* uplo, char* compq, lapack_int* n, double* d,\n                    double* e, double* u, lapack_int* ldu, double* vt,\n                    lapack_int* ldvt, double* q, lapack_int* iq, double* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ssytrd( char* uplo, lapack_int* n, float* a, lapack_int* lda,\n                    float* d, float* e, float* tau, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dsytrd( char* uplo, lapack_int* n, double* a, lapack_int* lda,\n                    double* d, double* e, double* tau, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sorgtr( char* uplo, lapack_int* n, float* a, lapack_int* lda,\n                    const float* tau, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dorgtr( char* uplo, lapack_int* n, double* a, lapack_int* lda,\n                    const double* tau, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sormtr( char* side, char* uplo, char* trans, lapack_int* m,\n                    lapack_int* n, const float* a, lapack_int* lda,\n                    const float* tau, float* c, lapack_int* ldc, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dormtr( char* side, char* uplo, char* trans, lapack_int* m,\n                    lapack_int* n, const double* a, lapack_int* lda,\n                    const double* tau, double* c, lapack_int* ldc, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_chetrd( char* uplo, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, float* d, float* e,\n                    lapack_complex_float* tau, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zhetrd( char* uplo, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, double* d, double* e,\n                    lapack_complex_double* tau, lapack_complex_double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cungtr( char* uplo, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* tau,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zungtr( char* uplo, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, const lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cunmtr( char* side, char* uplo, char* trans, lapack_int* m,\n                    lapack_int* n, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* tau,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zunmtr( char* side, char* uplo, char* trans, lapack_int* m,\n                    lapack_int* n, const lapack_complex_double* a,\n                    lapack_int* lda, const lapack_complex_double* tau,\n                    lapack_complex_double* c, lapack_int* ldc,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_ssptrd( char* uplo, lapack_int* n, float* ap, float* d, float* e,\n                    float* tau, lapack_int *info );\nvoid LAPACK_dsptrd( char* uplo, lapack_int* n, double* ap, double* d, double* e,\n                    double* tau, lapack_int *info );\nvoid LAPACK_sopgtr( char* uplo, lapack_int* n, const float* ap,\n                    const float* tau, float* q, lapack_int* ldq, float* work,\n                    lapack_int *info );\nvoid LAPACK_dopgtr( char* uplo, lapack_int* n, const double* ap,\n                    const double* tau, double* q, lapack_int* ldq, double* work,\n                    lapack_int *info );\nvoid LAPACK_sopmtr( char* side, char* uplo, char* trans, lapack_int* m,\n                    lapack_int* n, const float* ap, const float* tau, float* c,\n                    lapack_int* ldc, float* work, lapack_int *info );\nvoid LAPACK_dopmtr( char* side, char* uplo, char* trans, lapack_int* m,\n                    lapack_int* n, const double* ap, const double* tau,\n                    double* c, lapack_int* ldc, double* work,\n                    lapack_int *info );\nvoid LAPACK_chptrd( char* uplo, lapack_int* n, lapack_complex_float* ap,\n                    float* d, float* e, lapack_complex_float* tau,\n                    lapack_int *info );\nvoid LAPACK_zhptrd( char* uplo, lapack_int* n, lapack_complex_double* ap,\n                    double* d, double* e, lapack_complex_double* tau,\n                    lapack_int *info );\nvoid LAPACK_cupgtr( char* uplo, lapack_int* n, const lapack_complex_float* ap,\n                    const lapack_complex_float* tau, lapack_complex_float* q,\n                    lapack_int* ldq, lapack_complex_float* work,\n                    lapack_int *info );\nvoid LAPACK_zupgtr( char* uplo, lapack_int* n, const lapack_complex_double* ap,\n                    const lapack_complex_double* tau, lapack_complex_double* q,\n                    lapack_int* ldq, lapack_complex_double* work,\n                    lapack_int *info );\nvoid LAPACK_cupmtr( char* side, char* uplo, char* trans, lapack_int* m,\n                    lapack_int* n, const lapack_complex_float* ap,\n                    const lapack_complex_float* tau, lapack_complex_float* c,\n                    lapack_int* ldc, lapack_complex_float* work,\n                    lapack_int *info );\nvoid LAPACK_zupmtr( char* side, char* uplo, char* trans, lapack_int* m,\n                    lapack_int* n, const lapack_complex_double* ap,\n                    const lapack_complex_double* tau, lapack_complex_double* c,\n                    lapack_int* ldc, lapack_complex_double* work,\n                    lapack_int *info );\nvoid LAPACK_ssbtrd( char* vect, char* uplo, lapack_int* n, lapack_int* kd,\n                    float* ab, lapack_int* ldab, float* d, float* e, float* q,\n                    lapack_int* ldq, float* work, lapack_int *info );\nvoid LAPACK_dsbtrd( char* vect, char* uplo, lapack_int* n, lapack_int* kd,\n                    double* ab, lapack_int* ldab, double* d, double* e,\n                    double* q, lapack_int* ldq, double* work,\n                    lapack_int *info );\nvoid LAPACK_chbtrd( char* vect, char* uplo, lapack_int* n, lapack_int* kd,\n                    lapack_complex_float* ab, lapack_int* ldab, float* d,\n                    float* e, lapack_complex_float* q, lapack_int* ldq,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zhbtrd( char* vect, char* uplo, lapack_int* n, lapack_int* kd,\n                    lapack_complex_double* ab, lapack_int* ldab, double* d,\n                    double* e, lapack_complex_double* q, lapack_int* ldq,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_ssterf( lapack_int* n, float* d, float* e, lapack_int *info );\nvoid LAPACK_dsterf( lapack_int* n, double* d, double* e, lapack_int *info );\nvoid LAPACK_ssteqr( char* compz, lapack_int* n, float* d, float* e, float* z,\n                    lapack_int* ldz, float* work, lapack_int *info );\nvoid LAPACK_dsteqr( char* compz, lapack_int* n, double* d, double* e, double* z,\n                    lapack_int* ldz, double* work, lapack_int *info );\nvoid LAPACK_csteqr( char* compz, lapack_int* n, float* d, float* e,\n                    lapack_complex_float* z, lapack_int* ldz, float* work,\n                    lapack_int *info );\nvoid LAPACK_zsteqr( char* compz, lapack_int* n, double* d, double* e,\n                    lapack_complex_double* z, lapack_int* ldz, double* work,\n                    lapack_int *info );\nvoid LAPACK_sstemr( char* jobz, char* range, lapack_int* n, float* d, float* e,\n                    float* vl, float* vu, lapack_int* il, lapack_int* iu,\n                    lapack_int* m, float* w, float* z, lapack_int* ldz,\n                    lapack_int* nzc, lapack_int* isuppz, lapack_logical* tryrac,\n                    float* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dstemr( char* jobz, char* range, lapack_int* n, double* d,\n                    double* e, double* vl, double* vu, lapack_int* il,\n                    lapack_int* iu, lapack_int* m, double* w, double* z,\n                    lapack_int* ldz, lapack_int* nzc, lapack_int* isuppz,\n                    lapack_logical* tryrac, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_cstemr( char* jobz, char* range, lapack_int* n, float* d, float* e,\n                    float* vl, float* vu, lapack_int* il, lapack_int* iu,\n                    lapack_int* m, float* w, lapack_complex_float* z,\n                    lapack_int* ldz, lapack_int* nzc, lapack_int* isuppz,\n                    lapack_logical* tryrac, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_zstemr( char* jobz, char* range, lapack_int* n, double* d,\n                    double* e, double* vl, double* vu, lapack_int* il,\n                    lapack_int* iu, lapack_int* m, double* w,\n                    lapack_complex_double* z, lapack_int* ldz, lapack_int* nzc,\n                    lapack_int* isuppz, lapack_logical* tryrac, double* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_sstedc( char* compz, lapack_int* n, float* d, float* e, float* z,\n                    lapack_int* ldz, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dstedc( char* compz, lapack_int* n, double* d, double* e, double* z,\n                    lapack_int* ldz, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_cstedc( char* compz, lapack_int* n, float* d, float* e,\n                    lapack_complex_float* z, lapack_int* ldz,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_zstedc( char* compz, lapack_int* n, double* d, double* e,\n                    lapack_complex_double* z, lapack_int* ldz,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* lrwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_sstegr( char* jobz, char* range, lapack_int* n, float* d, float* e,\n                    float* vl, float* vu, lapack_int* il, lapack_int* iu,\n                    float* abstol, lapack_int* m, float* w, float* z,\n                    lapack_int* ldz, lapack_int* isuppz, float* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_dstegr( char* jobz, char* range, lapack_int* n, double* d,\n                    double* e, double* vl, double* vu, lapack_int* il,\n                    lapack_int* iu, double* abstol, lapack_int* m, double* w,\n                    double* z, lapack_int* ldz, lapack_int* isuppz,\n                    double* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_cstegr( char* jobz, char* range, lapack_int* n, float* d, float* e,\n                    float* vl, float* vu, lapack_int* il, lapack_int* iu,\n                    float* abstol, lapack_int* m, float* w,\n                    lapack_complex_float* z, lapack_int* ldz,\n                    lapack_int* isuppz, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_zstegr( char* jobz, char* range, lapack_int* n, double* d,\n                    double* e, double* vl, double* vu, lapack_int* il,\n                    lapack_int* iu, double* abstol, lapack_int* m, double* w,\n                    lapack_complex_double* z, lapack_int* ldz,\n                    lapack_int* isuppz, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_spteqr( char* compz, lapack_int* n, float* d, float* e, float* z,\n                    lapack_int* ldz, float* work, lapack_int *info );\nvoid LAPACK_dpteqr( char* compz, lapack_int* n, double* d, double* e, double* z,\n                    lapack_int* ldz, double* work, lapack_int *info );\nvoid LAPACK_cpteqr( char* compz, lapack_int* n, float* d, float* e,\n                    lapack_complex_float* z, lapack_int* ldz, float* work,\n                    lapack_int *info );\nvoid LAPACK_zpteqr( char* compz, lapack_int* n, double* d, double* e,\n                    lapack_complex_double* z, lapack_int* ldz, double* work,\n                    lapack_int *info );\nvoid LAPACK_sstebz( char* range, char* order, lapack_int* n, float* vl,\n                    float* vu, lapack_int* il, lapack_int* iu, float* abstol,\n                    const float* d, const float* e, lapack_int* m,\n                    lapack_int* nsplit, float* w, lapack_int* iblock,\n                    lapack_int* isplit, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dstebz( char* range, char* order, lapack_int* n, double* vl,\n                    double* vu, lapack_int* il, lapack_int* iu, double* abstol,\n                    const double* d, const double* e, lapack_int* m,\n                    lapack_int* nsplit, double* w, lapack_int* iblock,\n                    lapack_int* isplit, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_sstein( lapack_int* n, const float* d, const float* e,\n                    lapack_int* m, const float* w, const lapack_int* iblock,\n                    const lapack_int* isplit, float* z, lapack_int* ldz,\n                    float* work, lapack_int* iwork, lapack_int* ifailv,\n                    lapack_int *info );\nvoid LAPACK_dstein( lapack_int* n, const double* d, const double* e,\n                    lapack_int* m, const double* w, const lapack_int* iblock,\n                    const lapack_int* isplit, double* z, lapack_int* ldz,\n                    double* work, lapack_int* iwork, lapack_int* ifailv,\n                    lapack_int *info );\nvoid LAPACK_cstein( lapack_int* n, const float* d, const float* e,\n                    lapack_int* m, const float* w, const lapack_int* iblock,\n                    const lapack_int* isplit, lapack_complex_float* z,\n                    lapack_int* ldz, float* work, lapack_int* iwork,\n                    lapack_int* ifailv, lapack_int *info );\nvoid LAPACK_zstein( lapack_int* n, const double* d, const double* e,\n                    lapack_int* m, const double* w, const lapack_int* iblock,\n                    const lapack_int* isplit, lapack_complex_double* z,\n                    lapack_int* ldz, double* work, lapack_int* iwork,\n                    lapack_int* ifailv, lapack_int *info );\nvoid LAPACK_sdisna( char* job, lapack_int* m, lapack_int* n, const float* d,\n                    float* sep, lapack_int *info );\nvoid LAPACK_ddisna( char* job, lapack_int* m, lapack_int* n, const double* d,\n                    double* sep, lapack_int *info );\nvoid LAPACK_ssygst( lapack_int* itype, char* uplo, lapack_int* n, float* a,\n                    lapack_int* lda, const float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_dsygst( lapack_int* itype, char* uplo, lapack_int* n, double* a,\n                    lapack_int* lda, const double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_chegst( lapack_int* itype, char* uplo, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_zhegst( lapack_int* itype, char* uplo, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_sspgst( lapack_int* itype, char* uplo, lapack_int* n, float* ap,\n                    const float* bp, lapack_int *info );\nvoid LAPACK_dspgst( lapack_int* itype, char* uplo, lapack_int* n, double* ap,\n                    const double* bp, lapack_int *info );\nvoid LAPACK_chpgst( lapack_int* itype, char* uplo, lapack_int* n,\n                    lapack_complex_float* ap, const lapack_complex_float* bp,\n                    lapack_int *info );\nvoid LAPACK_zhpgst( lapack_int* itype, char* uplo, lapack_int* n,\n                    lapack_complex_double* ap, const lapack_complex_double* bp,\n                    lapack_int *info );\nvoid LAPACK_ssbgst( char* vect, char* uplo, lapack_int* n, lapack_int* ka,\n                    lapack_int* kb, float* ab, lapack_int* ldab,\n                    const float* bb, lapack_int* ldbb, float* x,\n                    lapack_int* ldx, float* work, lapack_int *info );\nvoid LAPACK_dsbgst( char* vect, char* uplo, lapack_int* n, lapack_int* ka,\n                    lapack_int* kb, double* ab, lapack_int* ldab,\n                    const double* bb, lapack_int* ldbb, double* x,\n                    lapack_int* ldx, double* work, lapack_int *info );\nvoid LAPACK_chbgst( char* vect, char* uplo, lapack_int* n, lapack_int* ka,\n                    lapack_int* kb, lapack_complex_float* ab, lapack_int* ldab,\n                    const lapack_complex_float* bb, lapack_int* ldbb,\n                    lapack_complex_float* x, lapack_int* ldx,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zhbgst( char* vect, char* uplo, lapack_int* n, lapack_int* ka,\n                    lapack_int* kb, lapack_complex_double* ab, lapack_int* ldab,\n                    const lapack_complex_double* bb, lapack_int* ldbb,\n                    lapack_complex_double* x, lapack_int* ldx,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_spbstf( char* uplo, lapack_int* n, lapack_int* kb, float* bb,\n                    lapack_int* ldbb, lapack_int *info );\nvoid LAPACK_dpbstf( char* uplo, lapack_int* n, lapack_int* kb, double* bb,\n                    lapack_int* ldbb, lapack_int *info );\nvoid LAPACK_cpbstf( char* uplo, lapack_int* n, lapack_int* kb,\n                    lapack_complex_float* bb, lapack_int* ldbb,\n                    lapack_int *info );\nvoid LAPACK_zpbstf( char* uplo, lapack_int* n, lapack_int* kb,\n                    lapack_complex_double* bb, lapack_int* ldbb,\n                    lapack_int *info );\nvoid LAPACK_sgehrd( lapack_int* n, lapack_int* ilo, lapack_int* ihi, float* a,\n                    lapack_int* lda, float* tau, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dgehrd( lapack_int* n, lapack_int* ilo, lapack_int* ihi, double* a,\n                    lapack_int* lda, double* tau, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cgehrd( lapack_int* n, lapack_int* ilo, lapack_int* ihi,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* tau, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zgehrd( lapack_int* n, lapack_int* ilo, lapack_int* ihi,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* tau, lapack_complex_double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sorghr( lapack_int* n, lapack_int* ilo, lapack_int* ihi, float* a,\n                    lapack_int* lda, const float* tau, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dorghr( lapack_int* n, lapack_int* ilo, lapack_int* ihi, double* a,\n                    lapack_int* lda, const double* tau, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sormhr( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* ilo, lapack_int* ihi, const float* a,\n                    lapack_int* lda, const float* tau, float* c,\n                    lapack_int* ldc, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dormhr( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* ilo, lapack_int* ihi, const double* a,\n                    lapack_int* lda, const double* tau, double* c,\n                    lapack_int* ldc, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cunghr( lapack_int* n, lapack_int* ilo, lapack_int* ihi,\n                    lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* tau, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zunghr( lapack_int* n, lapack_int* ilo, lapack_int* ihi,\n                    lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cunmhr( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* ilo, lapack_int* ihi,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* tau, lapack_complex_float* c,\n                    lapack_int* ldc, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zunmhr( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* ilo, lapack_int* ihi,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* tau, lapack_complex_double* c,\n                    lapack_int* ldc, lapack_complex_double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sgebal( char* job, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int* ilo, lapack_int* ihi, float* scale,\n                    lapack_int *info );\nvoid LAPACK_dgebal( char* job, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int* ilo, lapack_int* ihi, double* scale,\n                    lapack_int *info );\nvoid LAPACK_cgebal( char* job, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int* ilo, lapack_int* ihi,\n                    float* scale, lapack_int *info );\nvoid LAPACK_zgebal( char* job, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* ilo, lapack_int* ihi,\n                    double* scale, lapack_int *info );\nvoid LAPACK_sgebak( char* job, char* side, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, const float* scale, lapack_int* m,\n                    float* v, lapack_int* ldv, lapack_int *info );\nvoid LAPACK_dgebak( char* job, char* side, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, const double* scale, lapack_int* m,\n                    double* v, lapack_int* ldv, lapack_int *info );\nvoid LAPACK_cgebak( char* job, char* side, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, const float* scale, lapack_int* m,\n                    lapack_complex_float* v, lapack_int* ldv,\n                    lapack_int *info );\nvoid LAPACK_zgebak( char* job, char* side, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, const double* scale, lapack_int* m,\n                    lapack_complex_double* v, lapack_int* ldv,\n                    lapack_int *info );\nvoid LAPACK_shseqr( char* job, char* compz, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, float* h, lapack_int* ldh, float* wr,\n                    float* wi, float* z, lapack_int* ldz, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dhseqr( char* job, char* compz, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, double* h, lapack_int* ldh, double* wr,\n                    double* wi, double* z, lapack_int* ldz, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_chseqr( char* job, char* compz, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, lapack_complex_float* h, lapack_int* ldh,\n                    lapack_complex_float* w, lapack_complex_float* z,\n                    lapack_int* ldz, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zhseqr( char* job, char* compz, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, lapack_complex_double* h, lapack_int* ldh,\n                    lapack_complex_double* w, lapack_complex_double* z,\n                    lapack_int* ldz, lapack_complex_double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_shsein( char* job, char* eigsrc, char* initv,\n                    lapack_logical* select, lapack_int* n, const float* h,\n                    lapack_int* ldh, float* wr, const float* wi, float* vl,\n                    lapack_int* ldvl, float* vr, lapack_int* ldvr,\n                    lapack_int* mm, lapack_int* m, float* work,\n                    lapack_int* ifaill, lapack_int* ifailr, lapack_int *info );\nvoid LAPACK_dhsein( char* job, char* eigsrc, char* initv,\n                    lapack_logical* select, lapack_int* n, const double* h,\n                    lapack_int* ldh, double* wr, const double* wi, double* vl,\n                    lapack_int* ldvl, double* vr, lapack_int* ldvr,\n                    lapack_int* mm, lapack_int* m, double* work,\n                    lapack_int* ifaill, lapack_int* ifailr, lapack_int *info );\nvoid LAPACK_chsein( char* job, char* eigsrc, char* initv,\n                    const lapack_logical* select, lapack_int* n,\n                    const lapack_complex_float* h, lapack_int* ldh,\n                    lapack_complex_float* w, lapack_complex_float* vl,\n                    lapack_int* ldvl, lapack_complex_float* vr,\n                    lapack_int* ldvr, lapack_int* mm, lapack_int* m,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int* ifaill, lapack_int* ifailr, lapack_int *info );\nvoid LAPACK_zhsein( char* job, char* eigsrc, char* initv,\n                    const lapack_logical* select, lapack_int* n,\n                    const lapack_complex_double* h, lapack_int* ldh,\n                    lapack_complex_double* w, lapack_complex_double* vl,\n                    lapack_int* ldvl, lapack_complex_double* vr,\n                    lapack_int* ldvr, lapack_int* mm, lapack_int* m,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int* ifaill, lapack_int* ifailr, lapack_int *info );\nvoid LAPACK_strevc( char* side, char* howmny, lapack_logical* select,\n                    lapack_int* n, const float* t, lapack_int* ldt, float* vl,\n                    lapack_int* ldvl, float* vr, lapack_int* ldvr,\n                    lapack_int* mm, lapack_int* m, float* work,\n                    lapack_int *info );\nvoid LAPACK_dtrevc( char* side, char* howmny, lapack_logical* select,\n                    lapack_int* n, const double* t, lapack_int* ldt, double* vl,\n                    lapack_int* ldvl, double* vr, lapack_int* ldvr,\n                    lapack_int* mm, lapack_int* m, double* work,\n                    lapack_int *info );\nvoid LAPACK_ctrevc( char* side, char* howmny, const lapack_logical* select,\n                    lapack_int* n, lapack_complex_float* t, lapack_int* ldt,\n                    lapack_complex_float* vl, lapack_int* ldvl,\n                    lapack_complex_float* vr, lapack_int* ldvr, lapack_int* mm,\n                    lapack_int* m, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_ztrevc( char* side, char* howmny, const lapack_logical* select,\n                    lapack_int* n, lapack_complex_double* t, lapack_int* ldt,\n                    lapack_complex_double* vl, lapack_int* ldvl,\n                    lapack_complex_double* vr, lapack_int* ldvr, lapack_int* mm,\n                    lapack_int* m, lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_strsna( char* job, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const float* t, lapack_int* ldt,\n                    const float* vl, lapack_int* ldvl, const float* vr,\n                    lapack_int* ldvr, float* s, float* sep, lapack_int* mm,\n                    lapack_int* m, float* work, lapack_int* ldwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dtrsna( char* job, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const double* t, lapack_int* ldt,\n                    const double* vl, lapack_int* ldvl, const double* vr,\n                    lapack_int* ldvr, double* s, double* sep, lapack_int* mm,\n                    lapack_int* m, double* work, lapack_int* ldwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ctrsna( char* job, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const lapack_complex_float* t,\n                    lapack_int* ldt, const lapack_complex_float* vl,\n                    lapack_int* ldvl, const lapack_complex_float* vr,\n                    lapack_int* ldvr, float* s, float* sep, lapack_int* mm,\n                    lapack_int* m, lapack_complex_float* work,\n                    lapack_int* ldwork, float* rwork, lapack_int *info );\nvoid LAPACK_ztrsna( char* job, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const lapack_complex_double* t,\n                    lapack_int* ldt, const lapack_complex_double* vl,\n                    lapack_int* ldvl, const lapack_complex_double* vr,\n                    lapack_int* ldvr, double* s, double* sep, lapack_int* mm,\n                    lapack_int* m, lapack_complex_double* work,\n                    lapack_int* ldwork, double* rwork, lapack_int *info );\nvoid LAPACK_strexc( char* compq, lapack_int* n, float* t, lapack_int* ldt,\n                    float* q, lapack_int* ldq, lapack_int* ifst,\n                    lapack_int* ilst, float* work, lapack_int *info );\nvoid LAPACK_dtrexc( char* compq, lapack_int* n, double* t, lapack_int* ldt,\n                    double* q, lapack_int* ldq, lapack_int* ifst,\n                    lapack_int* ilst, double* work, lapack_int *info );\nvoid LAPACK_ctrexc( char* compq, lapack_int* n, lapack_complex_float* t,\n                    lapack_int* ldt, lapack_complex_float* q, lapack_int* ldq,\n                    lapack_int* ifst, lapack_int* ilst, lapack_int *info );\nvoid LAPACK_ztrexc( char* compq, lapack_int* n, lapack_complex_double* t,\n                    lapack_int* ldt, lapack_complex_double* q, lapack_int* ldq,\n                    lapack_int* ifst, lapack_int* ilst, lapack_int *info );\nvoid LAPACK_strsen( char* job, char* compq, const lapack_logical* select,\n                    lapack_int* n, float* t, lapack_int* ldt, float* q,\n                    lapack_int* ldq, float* wr, float* wi, lapack_int* m,\n                    float* s, float* sep, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dtrsen( char* job, char* compq, const lapack_logical* select,\n                    lapack_int* n, double* t, lapack_int* ldt, double* q,\n                    lapack_int* ldq, double* wr, double* wi, lapack_int* m,\n                    double* s, double* sep, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_ctrsen( char* job, char* compq, const lapack_logical* select,\n                    lapack_int* n, lapack_complex_float* t, lapack_int* ldt,\n                    lapack_complex_float* q, lapack_int* ldq,\n                    lapack_complex_float* w, lapack_int* m, float* s,\n                    float* sep, lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_ztrsen( char* job, char* compq, const lapack_logical* select,\n                    lapack_int* n, lapack_complex_double* t, lapack_int* ldt,\n                    lapack_complex_double* q, lapack_int* ldq,\n                    lapack_complex_double* w, lapack_int* m, double* s,\n                    double* sep, lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_strsyl( char* trana, char* tranb, lapack_int* isgn, lapack_int* m,\n                    lapack_int* n, const float* a, lapack_int* lda,\n                    const float* b, lapack_int* ldb, float* c, lapack_int* ldc,\n                    float* scale, lapack_int *info );\nvoid LAPACK_dtrsyl( char* trana, char* tranb, lapack_int* isgn, lapack_int* m,\n                    lapack_int* n, const double* a, lapack_int* lda,\n                    const double* b, lapack_int* ldb, double* c,\n                    lapack_int* ldc, double* scale, lapack_int *info );\nvoid LAPACK_ctrsyl( char* trana, char* tranb, lapack_int* isgn, lapack_int* m,\n                    lapack_int* n, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* c, lapack_int* ldc,\n                    float* scale, lapack_int *info );\nvoid LAPACK_ztrsyl( char* trana, char* tranb, lapack_int* isgn, lapack_int* m,\n                    lapack_int* n, const lapack_complex_double* a,\n                    lapack_int* lda, const lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* c, lapack_int* ldc,\n                    double* scale, lapack_int *info );\nvoid LAPACK_sgghrd( char* compq, char* compz, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, float* a, lapack_int* lda, float* b,\n                    lapack_int* ldb, float* q, lapack_int* ldq, float* z,\n                    lapack_int* ldz, lapack_int *info );\nvoid LAPACK_dgghrd( char* compq, char* compz, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, double* a, lapack_int* lda, double* b,\n                    lapack_int* ldb, double* q, lapack_int* ldq, double* z,\n                    lapack_int* ldz, lapack_int *info );\nvoid LAPACK_cgghrd( char* compq, char* compz, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* q, lapack_int* ldq,\n                    lapack_complex_float* z, lapack_int* ldz,\n                    lapack_int *info );\nvoid LAPACK_zgghrd( char* compq, char* compz, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* q, lapack_int* ldq,\n                    lapack_complex_double* z, lapack_int* ldz,\n                    lapack_int *info );\nvoid LAPACK_sggbal( char* job, lapack_int* n, float* a, lapack_int* lda,\n                    float* b, lapack_int* ldb, lapack_int* ilo, lapack_int* ihi,\n                    float* lscale, float* rscale, float* work,\n                    lapack_int *info );\nvoid LAPACK_dggbal( char* job, lapack_int* n, double* a, lapack_int* lda,\n                    double* b, lapack_int* ldb, lapack_int* ilo,\n                    lapack_int* ihi, double* lscale, double* rscale,\n                    double* work, lapack_int *info );\nvoid LAPACK_cggbal( char* job, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int* ilo, lapack_int* ihi, float* lscale,\n                    float* rscale, float* work, lapack_int *info );\nvoid LAPACK_zggbal( char* job, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int* ilo, lapack_int* ihi, double* lscale,\n                    double* rscale, double* work, lapack_int *info );\nvoid LAPACK_sggbak( char* job, char* side, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, const float* lscale, const float* rscale,\n                    lapack_int* m, float* v, lapack_int* ldv,\n                    lapack_int *info );\nvoid LAPACK_dggbak( char* job, char* side, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, const double* lscale, const double* rscale,\n                    lapack_int* m, double* v, lapack_int* ldv,\n                    lapack_int *info );\nvoid LAPACK_cggbak( char* job, char* side, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, const float* lscale, const float* rscale,\n                    lapack_int* m, lapack_complex_float* v, lapack_int* ldv,\n                    lapack_int *info );\nvoid LAPACK_zggbak( char* job, char* side, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, const double* lscale, const double* rscale,\n                    lapack_int* m, lapack_complex_double* v, lapack_int* ldv,\n                    lapack_int *info );\nvoid LAPACK_shgeqz( char* job, char* compq, char* compz, lapack_int* n,\n                    lapack_int* ilo, lapack_int* ihi, float* h, lapack_int* ldh,\n                    float* t, lapack_int* ldt, float* alphar, float* alphai,\n                    float* beta, float* q, lapack_int* ldq, float* z,\n                    lapack_int* ldz, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dhgeqz( char* job, char* compq, char* compz, lapack_int* n,\n                    lapack_int* ilo, lapack_int* ihi, double* h,\n                    lapack_int* ldh, double* t, lapack_int* ldt, double* alphar,\n                    double* alphai, double* beta, double* q, lapack_int* ldq,\n                    double* z, lapack_int* ldz, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_chgeqz( char* job, char* compq, char* compz, lapack_int* n,\n                    lapack_int* ilo, lapack_int* ihi, lapack_complex_float* h,\n                    lapack_int* ldh, lapack_complex_float* t, lapack_int* ldt,\n                    lapack_complex_float* alpha, lapack_complex_float* beta,\n                    lapack_complex_float* q, lapack_int* ldq,\n                    lapack_complex_float* z, lapack_int* ldz,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zhgeqz( char* job, char* compq, char* compz, lapack_int* n,\n                    lapack_int* ilo, lapack_int* ihi, lapack_complex_double* h,\n                    lapack_int* ldh, lapack_complex_double* t, lapack_int* ldt,\n                    lapack_complex_double* alpha, lapack_complex_double* beta,\n                    lapack_complex_double* q, lapack_int* ldq,\n                    lapack_complex_double* z, lapack_int* ldz,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_stgevc( char* side, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const float* s, lapack_int* lds,\n                    const float* p, lapack_int* ldp, float* vl,\n                    lapack_int* ldvl, float* vr, lapack_int* ldvr,\n                    lapack_int* mm, lapack_int* m, float* work,\n                    lapack_int *info );\nvoid LAPACK_dtgevc( char* side, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const double* s, lapack_int* lds,\n                    const double* p, lapack_int* ldp, double* vl,\n                    lapack_int* ldvl, double* vr, lapack_int* ldvr,\n                    lapack_int* mm, lapack_int* m, double* work,\n                    lapack_int *info );\nvoid LAPACK_ctgevc( char* side, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const lapack_complex_float* s,\n                    lapack_int* lds, const lapack_complex_float* p,\n                    lapack_int* ldp, lapack_complex_float* vl, lapack_int* ldvl,\n                    lapack_complex_float* vr, lapack_int* ldvr, lapack_int* mm,\n                    lapack_int* m, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_ztgevc( char* side, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const lapack_complex_double* s,\n                    lapack_int* lds, const lapack_complex_double* p,\n                    lapack_int* ldp, lapack_complex_double* vl,\n                    lapack_int* ldvl, lapack_complex_double* vr,\n                    lapack_int* ldvr, lapack_int* mm, lapack_int* m,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_stgexc( lapack_logical* wantq, lapack_logical* wantz, lapack_int* n,\n                    float* a, lapack_int* lda, float* b, lapack_int* ldb,\n                    float* q, lapack_int* ldq, float* z, lapack_int* ldz,\n                    lapack_int* ifst, lapack_int* ilst, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dtgexc( lapack_logical* wantq, lapack_logical* wantz, lapack_int* n,\n                    double* a, lapack_int* lda, double* b, lapack_int* ldb,\n                    double* q, lapack_int* ldq, double* z, lapack_int* ldz,\n                    lapack_int* ifst, lapack_int* ilst, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_ctgexc( lapack_logical* wantq, lapack_logical* wantz, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* q, lapack_int* ldq,\n                    lapack_complex_float* z, lapack_int* ldz, lapack_int* ifst,\n                    lapack_int* ilst, lapack_int *info );\nvoid LAPACK_ztgexc( lapack_logical* wantq, lapack_logical* wantz, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* q, lapack_int* ldq,\n                    lapack_complex_double* z, lapack_int* ldz, lapack_int* ifst,\n                    lapack_int* ilst, lapack_int *info );\nvoid LAPACK_stgsen( lapack_int* ijob, lapack_logical* wantq,\n                    lapack_logical* wantz, const lapack_logical* select,\n                    lapack_int* n, float* a, lapack_int* lda, float* b,\n                    lapack_int* ldb, float* alphar, float* alphai, float* beta,\n                    float* q, lapack_int* ldq, float* z, lapack_int* ldz,\n                    lapack_int* m, float* pl, float* pr, float* dif,\n                    float* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dtgsen( lapack_int* ijob, lapack_logical* wantq,\n                    lapack_logical* wantz, const lapack_logical* select,\n                    lapack_int* n, double* a, lapack_int* lda, double* b,\n                    lapack_int* ldb, double* alphar, double* alphai,\n                    double* beta, double* q, lapack_int* ldq, double* z,\n                    lapack_int* ldz, lapack_int* m, double* pl, double* pr,\n                    double* dif, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_ctgsen( lapack_int* ijob, lapack_logical* wantq,\n                    lapack_logical* wantz, const lapack_logical* select,\n                    lapack_int* n, lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* alpha, lapack_complex_float* beta,\n                    lapack_complex_float* q, lapack_int* ldq,\n                    lapack_complex_float* z, lapack_int* ldz, lapack_int* m,\n                    float* pl, float* pr, float* dif,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_ztgsen( lapack_int* ijob, lapack_logical* wantq,\n                    lapack_logical* wantz, const lapack_logical* select,\n                    lapack_int* n, lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* alpha, lapack_complex_double* beta,\n                    lapack_complex_double* q, lapack_int* ldq,\n                    lapack_complex_double* z, lapack_int* ldz, lapack_int* m,\n                    double* pl, double* pr, double* dif,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_stgsyl( char* trans, lapack_int* ijob, lapack_int* m, lapack_int* n,\n                    const float* a, lapack_int* lda, const float* b,\n                    lapack_int* ldb, float* c, lapack_int* ldc, const float* d,\n                    lapack_int* ldd, const float* e, lapack_int* lde, float* f,\n                    lapack_int* ldf, float* scale, float* dif, float* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dtgsyl( char* trans, lapack_int* ijob, lapack_int* m, lapack_int* n,\n                    const double* a, lapack_int* lda, const double* b,\n                    lapack_int* ldb, double* c, lapack_int* ldc,\n                    const double* d, lapack_int* ldd, const double* e,\n                    lapack_int* lde, double* f, lapack_int* ldf, double* scale,\n                    double* dif, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ctgsyl( char* trans, lapack_int* ijob, lapack_int* m, lapack_int* n,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    const lapack_complex_float* d, lapack_int* ldd,\n                    const lapack_complex_float* e, lapack_int* lde,\n                    lapack_complex_float* f, lapack_int* ldf, float* scale,\n                    float* dif, lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ztgsyl( char* trans, lapack_int* ijob, lapack_int* m, lapack_int* n,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* c, lapack_int* ldc,\n                    const lapack_complex_double* d, lapack_int* ldd,\n                    const lapack_complex_double* e, lapack_int* lde,\n                    lapack_complex_double* f, lapack_int* ldf, double* scale,\n                    double* dif, lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_stgsna( char* job, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const float* a, lapack_int* lda,\n                    const float* b, lapack_int* ldb, const float* vl,\n                    lapack_int* ldvl, const float* vr, lapack_int* ldvr,\n                    float* s, float* dif, lapack_int* mm, lapack_int* m,\n                    float* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dtgsna( char* job, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const double* a, lapack_int* lda,\n                    const double* b, lapack_int* ldb, const double* vl,\n                    lapack_int* ldvl, const double* vr, lapack_int* ldvr,\n                    double* s, double* dif, lapack_int* mm, lapack_int* m,\n                    double* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_ctgsna( char* job, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* b,\n                    lapack_int* ldb, const lapack_complex_float* vl,\n                    lapack_int* ldvl, const lapack_complex_float* vr,\n                    lapack_int* ldvr, float* s, float* dif, lapack_int* mm,\n                    lapack_int* m, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ztgsna( char* job, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const lapack_complex_double* a,\n                    lapack_int* lda, const lapack_complex_double* b,\n                    lapack_int* ldb, const lapack_complex_double* vl,\n                    lapack_int* ldvl, const lapack_complex_double* vr,\n                    lapack_int* ldvr, double* s, double* dif, lapack_int* mm,\n                    lapack_int* m, lapack_complex_double* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_sggsvp( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* p, lapack_int* n, float* a, lapack_int* lda,\n                    float* b, lapack_int* ldb, float* tola, float* tolb,\n                    lapack_int* k, lapack_int* l, float* u, lapack_int* ldu,\n                    float* v, lapack_int* ldv, float* q, lapack_int* ldq,\n                    lapack_int* iwork, float* tau, float* work,\n                    lapack_int *info );\nvoid LAPACK_dggsvp( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* p, lapack_int* n, double* a, lapack_int* lda,\n                    double* b, lapack_int* ldb, double* tola, double* tolb,\n                    lapack_int* k, lapack_int* l, double* u, lapack_int* ldu,\n                    double* v, lapack_int* ldv, double* q, lapack_int* ldq,\n                    lapack_int* iwork, double* tau, double* work,\n                    lapack_int *info );\nvoid LAPACK_cggsvp( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* p, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* b, lapack_int* ldb,\n                    float* tola, float* tolb, lapack_int* k, lapack_int* l,\n                    lapack_complex_float* u, lapack_int* ldu,\n                    lapack_complex_float* v, lapack_int* ldv,\n                    lapack_complex_float* q, lapack_int* ldq, lapack_int* iwork,\n                    float* rwork, lapack_complex_float* tau,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zggsvp( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* p, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* b, lapack_int* ldb,\n                    double* tola, double* tolb, lapack_int* k, lapack_int* l,\n                    lapack_complex_double* u, lapack_int* ldu,\n                    lapack_complex_double* v, lapack_int* ldv,\n                    lapack_complex_double* q, lapack_int* ldq,\n                    lapack_int* iwork, double* rwork,\n                    lapack_complex_double* tau, lapack_complex_double* work,\n                    lapack_int *info );\nvoid LAPACK_stgsja( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* p, lapack_int* n, lapack_int* k, lapack_int* l,\n                    float* a, lapack_int* lda, float* b, lapack_int* ldb,\n                    float* tola, float* tolb, float* alpha, float* beta,\n                    float* u, lapack_int* ldu, float* v, lapack_int* ldv,\n                    float* q, lapack_int* ldq, float* work, lapack_int* ncycle,\n                    lapack_int *info );\nvoid LAPACK_dtgsja( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* p, lapack_int* n, lapack_int* k, lapack_int* l,\n                    double* a, lapack_int* lda, double* b, lapack_int* ldb,\n                    double* tola, double* tolb, double* alpha, double* beta,\n                    double* u, lapack_int* ldu, double* v, lapack_int* ldv,\n                    double* q, lapack_int* ldq, double* work,\n                    lapack_int* ncycle, lapack_int *info );\nvoid LAPACK_ctgsja( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* p, lapack_int* n, lapack_int* k, lapack_int* l,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb, float* tola,\n                    float* tolb, float* alpha, float* beta,\n                    lapack_complex_float* u, lapack_int* ldu,\n                    lapack_complex_float* v, lapack_int* ldv,\n                    lapack_complex_float* q, lapack_int* ldq,\n                    lapack_complex_float* work, lapack_int* ncycle,\n                    lapack_int *info );\nvoid LAPACK_ztgsja( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* p, lapack_int* n, lapack_int* k, lapack_int* l,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb, double* tola,\n                    double* tolb, double* alpha, double* beta,\n                    lapack_complex_double* u, lapack_int* ldu,\n                    lapack_complex_double* v, lapack_int* ldv,\n                    lapack_complex_double* q, lapack_int* ldq,\n                    lapack_complex_double* work, lapack_int* ncycle,\n                    lapack_int *info );\nvoid LAPACK_sgels( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs,\n                   float* a, lapack_int* lda, float* b, lapack_int* ldb,\n                   float* work, lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dgels( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs,\n                   double* a, lapack_int* lda, double* b, lapack_int* ldb,\n                   double* work, lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cgels( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_float* a, lapack_int* lda,\n                   lapack_complex_float* b, lapack_int* ldb,\n                   lapack_complex_float* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_zgels( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_double* a, lapack_int* lda,\n                   lapack_complex_double* b, lapack_int* ldb,\n                   lapack_complex_double* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_sgelsy( lapack_int* m, lapack_int* n, lapack_int* nrhs, float* a,\n                    lapack_int* lda, float* b, lapack_int* ldb,\n                    lapack_int* jpvt, float* rcond, lapack_int* rank,\n                    float* work, lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dgelsy( lapack_int* m, lapack_int* n, lapack_int* nrhs, double* a,\n                    lapack_int* lda, double* b, lapack_int* ldb,\n                    lapack_int* jpvt, double* rcond, lapack_int* rank,\n                    double* work, lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cgelsy( lapack_int* m, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb, lapack_int* jpvt,\n                    float* rcond, lapack_int* rank, lapack_complex_float* work,\n                    lapack_int* lwork, float* rwork, lapack_int *info );\nvoid LAPACK_zgelsy( lapack_int* m, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb, lapack_int* jpvt,\n                    double* rcond, lapack_int* rank,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_sgelss( lapack_int* m, lapack_int* n, lapack_int* nrhs, float* a,\n                    lapack_int* lda, float* b, lapack_int* ldb, float* s,\n                    float* rcond, lapack_int* rank, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dgelss( lapack_int* m, lapack_int* n, lapack_int* nrhs, double* a,\n                    lapack_int* lda, double* b, lapack_int* ldb, double* s,\n                    double* rcond, lapack_int* rank, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cgelss( lapack_int* m, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb, float* s,\n                    float* rcond, lapack_int* rank, lapack_complex_float* work,\n                    lapack_int* lwork, float* rwork, lapack_int *info );\nvoid LAPACK_zgelss( lapack_int* m, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb, double* s,\n                    double* rcond, lapack_int* rank,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_sgelsd( lapack_int* m, lapack_int* n, lapack_int* nrhs, float* a,\n                    lapack_int* lda, float* b, lapack_int* ldb, float* s,\n                    float* rcond, lapack_int* rank, float* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dgelsd( lapack_int* m, lapack_int* n, lapack_int* nrhs, double* a,\n                    lapack_int* lda, double* b, lapack_int* ldb, double* s,\n                    double* rcond, lapack_int* rank, double* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cgelsd( lapack_int* m, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb, float* s,\n                    float* rcond, lapack_int* rank, lapack_complex_float* work,\n                    lapack_int* lwork, float* rwork, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_zgelsd( lapack_int* m, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb, double* s,\n                    double* rcond, lapack_int* rank,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_sgglse( lapack_int* m, lapack_int* n, lapack_int* p, float* a,\n                    lapack_int* lda, float* b, lapack_int* ldb, float* c,\n                    float* d, float* x, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dgglse( lapack_int* m, lapack_int* n, lapack_int* p, double* a,\n                    lapack_int* lda, double* b, lapack_int* ldb, double* c,\n                    double* d, double* x, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cgglse( lapack_int* m, lapack_int* n, lapack_int* p,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* c, lapack_complex_float* d,\n                    lapack_complex_float* x, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zgglse( lapack_int* m, lapack_int* n, lapack_int* p,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* c, lapack_complex_double* d,\n                    lapack_complex_double* x, lapack_complex_double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sggglm( lapack_int* n, lapack_int* m, lapack_int* p, float* a,\n                    lapack_int* lda, float* b, lapack_int* ldb, float* d,\n                    float* x, float* y, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dggglm( lapack_int* n, lapack_int* m, lapack_int* p, double* a,\n                    lapack_int* lda, double* b, lapack_int* ldb, double* d,\n                    double* x, double* y, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cggglm( lapack_int* n, lapack_int* m, lapack_int* p,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* d, lapack_complex_float* x,\n                    lapack_complex_float* y, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zggglm( lapack_int* n, lapack_int* m, lapack_int* p,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* d, lapack_complex_double* x,\n                    lapack_complex_double* y, lapack_complex_double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_ssyev( char* jobz, char* uplo, lapack_int* n, float* a,\n                   lapack_int* lda, float* w, float* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_dsyev( char* jobz, char* uplo, lapack_int* n, double* a,\n                   lapack_int* lda, double* w, double* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_cheev( char* jobz, char* uplo, lapack_int* n,\n                   lapack_complex_float* a, lapack_int* lda, float* w,\n                   lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                   lapack_int *info );\nvoid LAPACK_zheev( char* jobz, char* uplo, lapack_int* n,\n                   lapack_complex_double* a, lapack_int* lda, double* w,\n                   lapack_complex_double* work, lapack_int* lwork,\n                   double* rwork, lapack_int *info );\nvoid LAPACK_ssyevd( char* jobz, char* uplo, lapack_int* n, float* a,\n                    lapack_int* lda, float* w, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dsyevd( char* jobz, char* uplo, lapack_int* n, double* a,\n                    lapack_int* lda, double* w, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_cheevd( char* jobz, char* uplo, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda, float* w,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_zheevd( char* jobz, char* uplo, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda, double* w,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* lrwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_ssyevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    float* a, lapack_int* lda, float* vl, float* vu,\n                    lapack_int* il, lapack_int* iu, float* abstol,\n                    lapack_int* m, float* w, float* z, lapack_int* ldz,\n                    float* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_dsyevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    double* a, lapack_int* lda, double* vl, double* vu,\n                    lapack_int* il, lapack_int* iu, double* abstol,\n                    lapack_int* m, double* w, double* z, lapack_int* ldz,\n                    double* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_cheevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda, float* vl,\n                    float* vu, lapack_int* il, lapack_int* iu, float* abstol,\n                    lapack_int* m, float* w, lapack_complex_float* z,\n                    lapack_int* ldz, lapack_complex_float* work,\n                    lapack_int* lwork, float* rwork, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_zheevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda, double* vl,\n                    double* vu, lapack_int* il, lapack_int* iu, double* abstol,\n                    lapack_int* m, double* w, lapack_complex_double* z,\n                    lapack_int* ldz, lapack_complex_double* work,\n                    lapack_int* lwork, double* rwork, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_ssyevr( char* jobz, char* range, char* uplo, lapack_int* n,\n                    float* a, lapack_int* lda, float* vl, float* vu,\n                    lapack_int* il, lapack_int* iu, float* abstol,\n                    lapack_int* m, float* w, float* z, lapack_int* ldz,\n                    lapack_int* isuppz, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dsyevr( char* jobz, char* range, char* uplo, lapack_int* n,\n                    double* a, lapack_int* lda, double* vl, double* vu,\n                    lapack_int* il, lapack_int* iu, double* abstol,\n                    lapack_int* m, double* w, double* z, lapack_int* ldz,\n                    lapack_int* isuppz, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_cheevr( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda, float* vl,\n                    float* vu, lapack_int* il, lapack_int* iu, float* abstol,\n                    lapack_int* m, float* w, lapack_complex_float* z,\n                    lapack_int* ldz, lapack_int* isuppz,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_zheevr( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda, double* vl,\n                    double* vu, lapack_int* il, lapack_int* iu, double* abstol,\n                    lapack_int* m, double* w, lapack_complex_double* z,\n                    lapack_int* ldz, lapack_int* isuppz,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* lrwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_sspev( char* jobz, char* uplo, lapack_int* n, float* ap, float* w,\n                   float* z, lapack_int* ldz, float* work, lapack_int *info );\nvoid LAPACK_dspev( char* jobz, char* uplo, lapack_int* n, double* ap, double* w,\n                   double* z, lapack_int* ldz, double* work, lapack_int *info );\nvoid LAPACK_chpev( char* jobz, char* uplo, lapack_int* n,\n                   lapack_complex_float* ap, float* w, lapack_complex_float* z,\n                   lapack_int* ldz, lapack_complex_float* work, float* rwork,\n                   lapack_int *info );\nvoid LAPACK_zhpev( char* jobz, char* uplo, lapack_int* n,\n                   lapack_complex_double* ap, double* w,\n                   lapack_complex_double* z, lapack_int* ldz,\n                   lapack_complex_double* work, double* rwork,\n                   lapack_int *info );\nvoid LAPACK_sspevd( char* jobz, char* uplo, lapack_int* n, float* ap, float* w,\n                    float* z, lapack_int* ldz, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dspevd( char* jobz, char* uplo, lapack_int* n, double* ap,\n                    double* w, double* z, lapack_int* ldz, double* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_chpevd( char* jobz, char* uplo, lapack_int* n,\n                    lapack_complex_float* ap, float* w, lapack_complex_float* z,\n                    lapack_int* ldz, lapack_complex_float* work,\n                    lapack_int* lwork, float* rwork, lapack_int* lrwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_zhpevd( char* jobz, char* uplo, lapack_int* n,\n                    lapack_complex_double* ap, double* w,\n                    lapack_complex_double* z, lapack_int* ldz,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* lrwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_sspevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    float* ap, float* vl, float* vu, lapack_int* il,\n                    lapack_int* iu, float* abstol, lapack_int* m, float* w,\n                    float* z, lapack_int* ldz, float* work, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_dspevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    double* ap, double* vl, double* vu, lapack_int* il,\n                    lapack_int* iu, double* abstol, lapack_int* m, double* w,\n                    double* z, lapack_int* ldz, double* work, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_chpevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_complex_float* ap, float* vl, float* vu,\n                    lapack_int* il, lapack_int* iu, float* abstol,\n                    lapack_int* m, float* w, lapack_complex_float* z,\n                    lapack_int* ldz, lapack_complex_float* work, float* rwork,\n                    lapack_int* iwork, lapack_int* ifail, lapack_int *info );\nvoid LAPACK_zhpevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_complex_double* ap, double* vl, double* vu,\n                    lapack_int* il, lapack_int* iu, double* abstol,\n                    lapack_int* m, double* w, lapack_complex_double* z,\n                    lapack_int* ldz, lapack_complex_double* work, double* rwork,\n                    lapack_int* iwork, lapack_int* ifail, lapack_int *info );\nvoid LAPACK_ssbev( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,\n                   float* ab, lapack_int* ldab, float* w, float* z,\n                   lapack_int* ldz, float* work, lapack_int *info );\nvoid LAPACK_dsbev( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,\n                   double* ab, lapack_int* ldab, double* w, double* z,\n                   lapack_int* ldz, double* work, lapack_int *info );\nvoid LAPACK_chbev( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,\n                   lapack_complex_float* ab, lapack_int* ldab, float* w,\n                   lapack_complex_float* z, lapack_int* ldz,\n                   lapack_complex_float* work, float* rwork, lapack_int *info );\nvoid LAPACK_zhbev( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,\n                   lapack_complex_double* ab, lapack_int* ldab, double* w,\n                   lapack_complex_double* z, lapack_int* ldz,\n                   lapack_complex_double* work, double* rwork,\n                   lapack_int *info );\nvoid LAPACK_ssbevd( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,\n                    float* ab, lapack_int* ldab, float* w, float* z,\n                    lapack_int* ldz, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dsbevd( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,\n                    double* ab, lapack_int* ldab, double* w, double* z,\n                    lapack_int* ldz, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_chbevd( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,\n                    lapack_complex_float* ab, lapack_int* ldab, float* w,\n                    lapack_complex_float* z, lapack_int* ldz,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_zhbevd( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,\n                    lapack_complex_double* ab, lapack_int* ldab, double* w,\n                    lapack_complex_double* z, lapack_int* ldz,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* lrwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_ssbevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_int* kd, float* ab, lapack_int* ldab, float* q,\n                    lapack_int* ldq, float* vl, float* vu, lapack_int* il,\n                    lapack_int* iu, float* abstol, lapack_int* m, float* w,\n                    float* z, lapack_int* ldz, float* work, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_dsbevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_int* kd, double* ab, lapack_int* ldab, double* q,\n                    lapack_int* ldq, double* vl, double* vu, lapack_int* il,\n                    lapack_int* iu, double* abstol, lapack_int* m, double* w,\n                    double* z, lapack_int* ldz, double* work, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_chbevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_int* kd, lapack_complex_float* ab, lapack_int* ldab,\n                    lapack_complex_float* q, lapack_int* ldq, float* vl,\n                    float* vu, lapack_int* il, lapack_int* iu, float* abstol,\n                    lapack_int* m, float* w, lapack_complex_float* z,\n                    lapack_int* ldz, lapack_complex_float* work, float* rwork,\n                    lapack_int* iwork, lapack_int* ifail, lapack_int *info );\nvoid LAPACK_zhbevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_int* kd, lapack_complex_double* ab, lapack_int* ldab,\n                    lapack_complex_double* q, lapack_int* ldq, double* vl,\n                    double* vu, lapack_int* il, lapack_int* iu, double* abstol,\n                    lapack_int* m, double* w, lapack_complex_double* z,\n                    lapack_int* ldz, lapack_complex_double* work, double* rwork,\n                    lapack_int* iwork, lapack_int* ifail, lapack_int *info );\nvoid LAPACK_sstev( char* jobz, lapack_int* n, float* d, float* e, float* z,\n                   lapack_int* ldz, float* work, lapack_int *info );\nvoid LAPACK_dstev( char* jobz, lapack_int* n, double* d, double* e, double* z,\n                   lapack_int* ldz, double* work, lapack_int *info );\nvoid LAPACK_sstevd( char* jobz, lapack_int* n, float* d, float* e, float* z,\n                    lapack_int* ldz, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dstevd( char* jobz, lapack_int* n, double* d, double* e, double* z,\n                    lapack_int* ldz, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_sstevx( char* jobz, char* range, lapack_int* n, float* d, float* e,\n                    float* vl, float* vu, lapack_int* il, lapack_int* iu,\n                    float* abstol, lapack_int* m, float* w, float* z,\n                    lapack_int* ldz, float* work, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_dstevx( char* jobz, char* range, lapack_int* n, double* d,\n                    double* e, double* vl, double* vu, lapack_int* il,\n                    lapack_int* iu, double* abstol, lapack_int* m, double* w,\n                    double* z, lapack_int* ldz, double* work, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_sstevr( char* jobz, char* range, lapack_int* n, float* d, float* e,\n                    float* vl, float* vu, lapack_int* il, lapack_int* iu,\n                    float* abstol, lapack_int* m, float* w, float* z,\n                    lapack_int* ldz, lapack_int* isuppz, float* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_dstevr( char* jobz, char* range, lapack_int* n, double* d,\n                    double* e, double* vl, double* vu, lapack_int* il,\n                    lapack_int* iu, double* abstol, lapack_int* m, double* w,\n                    double* z, lapack_int* ldz, lapack_int* isuppz,\n                    double* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_sgees( char* jobvs, char* sort, LAPACK_S_SELECT2 select,\n                   lapack_int* n, float* a, lapack_int* lda, lapack_int* sdim,\n                   float* wr, float* wi, float* vs, lapack_int* ldvs,\n                   float* work, lapack_int* lwork, lapack_logical* bwork,\n                   lapack_int *info );\nvoid LAPACK_dgees( char* jobvs, char* sort, LAPACK_D_SELECT2 select,\n                   lapack_int* n, double* a, lapack_int* lda, lapack_int* sdim,\n                   double* wr, double* wi, double* vs, lapack_int* ldvs,\n                   double* work, lapack_int* lwork, lapack_logical* bwork,\n                   lapack_int *info );\nvoid LAPACK_cgees( char* jobvs, char* sort, LAPACK_C_SELECT1 select,\n                   lapack_int* n, lapack_complex_float* a, lapack_int* lda,\n                   lapack_int* sdim, lapack_complex_float* w,\n                   lapack_complex_float* vs, lapack_int* ldvs,\n                   lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                   lapack_logical* bwork, lapack_int *info );\nvoid LAPACK_zgees( char* jobvs, char* sort, LAPACK_Z_SELECT1 select,\n                   lapack_int* n, lapack_complex_double* a, lapack_int* lda,\n                   lapack_int* sdim, lapack_complex_double* w,\n                   lapack_complex_double* vs, lapack_int* ldvs,\n                   lapack_complex_double* work, lapack_int* lwork,\n                   double* rwork, lapack_logical* bwork, lapack_int *info );\nvoid LAPACK_sgeesx( char* jobvs, char* sort, LAPACK_S_SELECT2 select,\n                    char* sense, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int* sdim, float* wr, float* wi, float* vs,\n                    lapack_int* ldvs, float* rconde, float* rcondv, float* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_logical* bwork, lapack_int *info );\nvoid LAPACK_dgeesx( char* jobvs, char* sort, LAPACK_D_SELECT2 select,\n                    char* sense, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int* sdim, double* wr, double* wi, double* vs,\n                    lapack_int* ldvs, double* rconde, double* rcondv,\n                    double* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_logical* bwork,\n                    lapack_int *info );\nvoid LAPACK_cgeesx( char* jobvs, char* sort, LAPACK_C_SELECT1 select,\n                    char* sense, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int* sdim, lapack_complex_float* w,\n                    lapack_complex_float* vs, lapack_int* ldvs, float* rconde,\n                    float* rcondv, lapack_complex_float* work,\n                    lapack_int* lwork, float* rwork, lapack_logical* bwork,\n                    lapack_int *info );\nvoid LAPACK_zgeesx( char* jobvs, char* sort, LAPACK_Z_SELECT1 select,\n                    char* sense, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* sdim, lapack_complex_double* w,\n                    lapack_complex_double* vs, lapack_int* ldvs, double* rconde,\n                    double* rcondv, lapack_complex_double* work,\n                    lapack_int* lwork, double* rwork, lapack_logical* bwork,\n                    lapack_int *info );\nvoid LAPACK_sgeev( char* jobvl, char* jobvr, lapack_int* n, float* a,\n                   lapack_int* lda, float* wr, float* wi, float* vl,\n                   lapack_int* ldvl, float* vr, lapack_int* ldvr, float* work,\n                   lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dgeev( char* jobvl, char* jobvr, lapack_int* n, double* a,\n                   lapack_int* lda, double* wr, double* wi, double* vl,\n                   lapack_int* ldvl, double* vr, lapack_int* ldvr, double* work,\n                   lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cgeev( char* jobvl, char* jobvr, lapack_int* n,\n                   lapack_complex_float* a, lapack_int* lda,\n                   lapack_complex_float* w, lapack_complex_float* vl,\n                   lapack_int* ldvl, lapack_complex_float* vr, lapack_int* ldvr,\n                   lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                   lapack_int *info );\nvoid LAPACK_zgeev( char* jobvl, char* jobvr, lapack_int* n,\n                   lapack_complex_double* a, lapack_int* lda,\n                   lapack_complex_double* w, lapack_complex_double* vl,\n                   lapack_int* ldvl, lapack_complex_double* vr,\n                   lapack_int* ldvr, lapack_complex_double* work,\n                   lapack_int* lwork, double* rwork, lapack_int *info );\nvoid LAPACK_sgeevx( char* balanc, char* jobvl, char* jobvr, char* sense,\n                    lapack_int* n, float* a, lapack_int* lda, float* wr,\n                    float* wi, float* vl, lapack_int* ldvl, float* vr,\n                    lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi,\n                    float* scale, float* abnrm, float* rconde, float* rcondv,\n                    float* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dgeevx( char* balanc, char* jobvl, char* jobvr, char* sense,\n                    lapack_int* n, double* a, lapack_int* lda, double* wr,\n                    double* wi, double* vl, lapack_int* ldvl, double* vr,\n                    lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi,\n                    double* scale, double* abnrm, double* rconde,\n                    double* rcondv, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cgeevx( char* balanc, char* jobvl, char* jobvr, char* sense,\n                    lapack_int* n, lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* w, lapack_complex_float* vl,\n                    lapack_int* ldvl, lapack_complex_float* vr,\n                    lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi,\n                    float* scale, float* abnrm, float* rconde, float* rcondv,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zgeevx( char* balanc, char* jobvl, char* jobvr, char* sense,\n                    lapack_int* n, lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* w, lapack_complex_double* vl,\n                    lapack_int* ldvl, lapack_complex_double* vr,\n                    lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi,\n                    double* scale, double* abnrm, double* rconde,\n                    double* rcondv, lapack_complex_double* work,\n                    lapack_int* lwork, double* rwork, lapack_int *info );\nvoid LAPACK_sgesvd( char* jobu, char* jobvt, lapack_int* m, lapack_int* n,\n                    float* a, lapack_int* lda, float* s, float* u,\n                    lapack_int* ldu, float* vt, lapack_int* ldvt, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dgesvd( char* jobu, char* jobvt, lapack_int* m, lapack_int* n,\n                    double* a, lapack_int* lda, double* s, double* u,\n                    lapack_int* ldu, double* vt, lapack_int* ldvt, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cgesvd( char* jobu, char* jobvt, lapack_int* m, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda, float* s,\n                    lapack_complex_float* u, lapack_int* ldu,\n                    lapack_complex_float* vt, lapack_int* ldvt,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zgesvd( char* jobu, char* jobvt, lapack_int* m, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda, double* s,\n                    lapack_complex_double* u, lapack_int* ldu,\n                    lapack_complex_double* vt, lapack_int* ldvt,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_sgesdd( char* jobz, lapack_int* m, lapack_int* n, float* a,\n                    lapack_int* lda, float* s, float* u, lapack_int* ldu,\n                    float* vt, lapack_int* ldvt, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dgesdd( char* jobz, lapack_int* m, lapack_int* n, double* a,\n                    lapack_int* lda, double* s, double* u, lapack_int* ldu,\n                    double* vt, lapack_int* ldvt, double* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cgesdd( char* jobz, lapack_int* m, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda, float* s,\n                    lapack_complex_float* u, lapack_int* ldu,\n                    lapack_complex_float* vt, lapack_int* ldvt,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_zgesdd( char* jobz, lapack_int* m, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda, double* s,\n                    lapack_complex_double* u, lapack_int* ldu,\n                    lapack_complex_double* vt, lapack_int* ldvt,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dgejsv( char* joba, char* jobu, char* jobv, char* jobr, char* jobt,\n                    char* jobp, lapack_int* m, lapack_int* n, double* a,\n                    lapack_int* lda, double* sva, double* u, lapack_int* ldu,\n                    double* v, lapack_int* ldv, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_sgejsv( char* joba, char* jobu, char* jobv, char* jobr, char* jobt,\n                    char* jobp, lapack_int* m, lapack_int* n, float* a,\n                    lapack_int* lda, float* sva, float* u, lapack_int* ldu,\n                    float* v, lapack_int* ldv, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dgesvj( char* joba, char* jobu, char* jobv, lapack_int* m,\n                    lapack_int* n, double* a, lapack_int* lda, double* sva,\n                    lapack_int* mv, double* v, lapack_int* ldv, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sgesvj( char* joba, char* jobu, char* jobv, lapack_int* m,\n                    lapack_int* n, float* a, lapack_int* lda, float* sva,\n                    lapack_int* mv, float* v, lapack_int* ldv, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sggsvd( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l,\n                    float* a, lapack_int* lda, float* b, lapack_int* ldb,\n                    float* alpha, float* beta, float* u, lapack_int* ldu,\n                    float* v, lapack_int* ldv, float* q, lapack_int* ldq,\n                    float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dggsvd( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l,\n                    double* a, lapack_int* lda, double* b, lapack_int* ldb,\n                    double* alpha, double* beta, double* u, lapack_int* ldu,\n                    double* v, lapack_int* ldv, double* q, lapack_int* ldq,\n                    double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cggsvd( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb, float* alpha,\n                    float* beta, lapack_complex_float* u, lapack_int* ldu,\n                    lapack_complex_float* v, lapack_int* ldv,\n                    lapack_complex_float* q, lapack_int* ldq,\n                    lapack_complex_float* work, float* rwork, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_zggsvd( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb, double* alpha,\n                    double* beta, lapack_complex_double* u, lapack_int* ldu,\n                    lapack_complex_double* v, lapack_int* ldv,\n                    lapack_complex_double* q, lapack_int* ldq,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ssygv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                   float* a, lapack_int* lda, float* b, lapack_int* ldb,\n                   float* w, float* work, lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dsygv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                   double* a, lapack_int* lda, double* b, lapack_int* ldb,\n                   double* w, double* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_chegv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                   lapack_complex_float* a, lapack_int* lda,\n                   lapack_complex_float* b, lapack_int* ldb, float* w,\n                   lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                   lapack_int *info );\nvoid LAPACK_zhegv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                   lapack_complex_double* a, lapack_int* lda,\n                   lapack_complex_double* b, lapack_int* ldb, double* w,\n                   lapack_complex_double* work, lapack_int* lwork,\n                   double* rwork, lapack_int *info );\nvoid LAPACK_ssygvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                    float* a, lapack_int* lda, float* b, lapack_int* ldb,\n                    float* w, float* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dsygvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                    double* a, lapack_int* lda, double* b, lapack_int* ldb,\n                    double* w, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_chegvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb, float* w,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_zhegvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb, double* w,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* lrwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_ssygvx( lapack_int* itype, char* jobz, char* range, char* uplo,\n                    lapack_int* n, float* a, lapack_int* lda, float* b,\n                    lapack_int* ldb, float* vl, float* vu, lapack_int* il,\n                    lapack_int* iu, float* abstol, lapack_int* m, float* w,\n                    float* z, lapack_int* ldz, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* ifail, lapack_int *info );\nvoid LAPACK_dsygvx( lapack_int* itype, char* jobz, char* range, char* uplo,\n                    lapack_int* n, double* a, lapack_int* lda, double* b,\n                    lapack_int* ldb, double* vl, double* vu, lapack_int* il,\n                    lapack_int* iu, double* abstol, lapack_int* m, double* w,\n                    double* z, lapack_int* ldz, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* ifail, lapack_int *info );\nvoid LAPACK_chegvx( lapack_int* itype, char* jobz, char* range, char* uplo,\n                    lapack_int* n, lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb, float* vl,\n                    float* vu, lapack_int* il, lapack_int* iu, float* abstol,\n                    lapack_int* m, float* w, lapack_complex_float* z,\n                    lapack_int* ldz, lapack_complex_float* work,\n                    lapack_int* lwork, float* rwork, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_zhegvx( lapack_int* itype, char* jobz, char* range, char* uplo,\n                    lapack_int* n, lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb, double* vl,\n                    double* vu, lapack_int* il, lapack_int* iu, double* abstol,\n                    lapack_int* m, double* w, lapack_complex_double* z,\n                    lapack_int* ldz, lapack_complex_double* work,\n                    lapack_int* lwork, double* rwork, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_sspgv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                   float* ap, float* bp, float* w, float* z, lapack_int* ldz,\n                   float* work, lapack_int *info );\nvoid LAPACK_dspgv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                   double* ap, double* bp, double* w, double* z,\n                   lapack_int* ldz, double* work, lapack_int *info );\nvoid LAPACK_chpgv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                   lapack_complex_float* ap, lapack_complex_float* bp, float* w,\n                   lapack_complex_float* z, lapack_int* ldz,\n                   lapack_complex_float* work, float* rwork, lapack_int *info );\nvoid LAPACK_zhpgv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                   lapack_complex_double* ap, lapack_complex_double* bp,\n                   double* w, lapack_complex_double* z, lapack_int* ldz,\n                   lapack_complex_double* work, double* rwork,\n                   lapack_int *info );\nvoid LAPACK_sspgvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                    float* ap, float* bp, float* w, float* z, lapack_int* ldz,\n                    float* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dspgvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                    double* ap, double* bp, double* w, double* z,\n                    lapack_int* ldz, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_chpgvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                    lapack_complex_float* ap, lapack_complex_float* bp,\n                    float* w, lapack_complex_float* z, lapack_int* ldz,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_zhpgvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                    lapack_complex_double* ap, lapack_complex_double* bp,\n                    double* w, lapack_complex_double* z, lapack_int* ldz,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* lrwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_sspgvx( lapack_int* itype, char* jobz, char* range, char* uplo,\n                    lapack_int* n, float* ap, float* bp, float* vl, float* vu,\n                    lapack_int* il, lapack_int* iu, float* abstol,\n                    lapack_int* m, float* w, float* z, lapack_int* ldz,\n                    float* work, lapack_int* iwork, lapack_int* ifail,\n                    lapack_int *info );\nvoid LAPACK_dspgvx( lapack_int* itype, char* jobz, char* range, char* uplo,\n                    lapack_int* n, double* ap, double* bp, double* vl,\n                    double* vu, lapack_int* il, lapack_int* iu, double* abstol,\n                    lapack_int* m, double* w, double* z, lapack_int* ldz,\n                    double* work, lapack_int* iwork, lapack_int* ifail,\n                    lapack_int *info );\nvoid LAPACK_chpgvx( lapack_int* itype, char* jobz, char* range, char* uplo,\n                    lapack_int* n, lapack_complex_float* ap,\n                    lapack_complex_float* bp, float* vl, float* vu,\n                    lapack_int* il, lapack_int* iu, float* abstol,\n                    lapack_int* m, float* w, lapack_complex_float* z,\n                    lapack_int* ldz, lapack_complex_float* work, float* rwork,\n                    lapack_int* iwork, lapack_int* ifail, lapack_int *info );\nvoid LAPACK_zhpgvx( lapack_int* itype, char* jobz, char* range, char* uplo,\n                    lapack_int* n, lapack_complex_double* ap,\n                    lapack_complex_double* bp, double* vl, double* vu,\n                    lapack_int* il, lapack_int* iu, double* abstol,\n                    lapack_int* m, double* w, lapack_complex_double* z,\n                    lapack_int* ldz, lapack_complex_double* work, double* rwork,\n                    lapack_int* iwork, lapack_int* ifail, lapack_int *info );\nvoid LAPACK_ssbgv( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,\n                   lapack_int* kb, float* ab, lapack_int* ldab, float* bb,\n                   lapack_int* ldbb, float* w, float* z, lapack_int* ldz,\n                   float* work, lapack_int *info );\nvoid LAPACK_dsbgv( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,\n                   lapack_int* kb, double* ab, lapack_int* ldab, double* bb,\n                   lapack_int* ldbb, double* w, double* z, lapack_int* ldz,\n                   double* work, lapack_int *info );\nvoid LAPACK_chbgv( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,\n                   lapack_int* kb, lapack_complex_float* ab, lapack_int* ldab,\n                   lapack_complex_float* bb, lapack_int* ldbb, float* w,\n                   lapack_complex_float* z, lapack_int* ldz,\n                   lapack_complex_float* work, float* rwork, lapack_int *info );\nvoid LAPACK_zhbgv( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,\n                   lapack_int* kb, lapack_complex_double* ab, lapack_int* ldab,\n                   lapack_complex_double* bb, lapack_int* ldbb, double* w,\n                   lapack_complex_double* z, lapack_int* ldz,\n                   lapack_complex_double* work, double* rwork,\n                   lapack_int *info );\nvoid LAPACK_ssbgvd( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,\n                    lapack_int* kb, float* ab, lapack_int* ldab, float* bb,\n                    lapack_int* ldbb, float* w, float* z, lapack_int* ldz,\n                    float* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dsbgvd( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,\n                    lapack_int* kb, double* ab, lapack_int* ldab, double* bb,\n                    lapack_int* ldbb, double* w, double* z, lapack_int* ldz,\n                    double* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_chbgvd( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,\n                    lapack_int* kb, lapack_complex_float* ab, lapack_int* ldab,\n                    lapack_complex_float* bb, lapack_int* ldbb, float* w,\n                    lapack_complex_float* z, lapack_int* ldz,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_zhbgvd( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,\n                    lapack_int* kb, lapack_complex_double* ab, lapack_int* ldab,\n                    lapack_complex_double* bb, lapack_int* ldbb, double* w,\n                    lapack_complex_double* z, lapack_int* ldz,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* lrwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_ssbgvx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_int* ka, lapack_int* kb, float* ab, lapack_int* ldab,\n                    float* bb, lapack_int* ldbb, float* q, lapack_int* ldq,\n                    float* vl, float* vu, lapack_int* il, lapack_int* iu,\n                    float* abstol, lapack_int* m, float* w, float* z,\n                    lapack_int* ldz, float* work, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_dsbgvx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_int* ka, lapack_int* kb, double* ab,\n                    lapack_int* ldab, double* bb, lapack_int* ldbb, double* q,\n                    lapack_int* ldq, double* vl, double* vu, lapack_int* il,\n                    lapack_int* iu, double* abstol, lapack_int* m, double* w,\n                    double* z, lapack_int* ldz, double* work, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_chbgvx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_int* ka, lapack_int* kb, lapack_complex_float* ab,\n                    lapack_int* ldab, lapack_complex_float* bb,\n                    lapack_int* ldbb, lapack_complex_float* q, lapack_int* ldq,\n                    float* vl, float* vu, lapack_int* il, lapack_int* iu,\n                    float* abstol, lapack_int* m, float* w,\n                    lapack_complex_float* z, lapack_int* ldz,\n                    lapack_complex_float* work, float* rwork, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_zhbgvx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_int* ka, lapack_int* kb, lapack_complex_double* ab,\n                    lapack_int* ldab, lapack_complex_double* bb,\n                    lapack_int* ldbb, lapack_complex_double* q, lapack_int* ldq,\n                    double* vl, double* vu, lapack_int* il, lapack_int* iu,\n                    double* abstol, lapack_int* m, double* w,\n                    lapack_complex_double* z, lapack_int* ldz,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int* iwork, lapack_int* ifail, lapack_int *info );\nvoid LAPACK_sgges( char* jobvsl, char* jobvsr, char* sort,\n                   LAPACK_S_SELECT3 selctg, lapack_int* n, float* a,\n                   lapack_int* lda, float* b, lapack_int* ldb, lapack_int* sdim,\n                   float* alphar, float* alphai, float* beta, float* vsl,\n                   lapack_int* ldvsl, float* vsr, lapack_int* ldvsr,\n                   float* work, lapack_int* lwork, lapack_logical* bwork,\n                   lapack_int *info );\nvoid LAPACK_dgges( char* jobvsl, char* jobvsr, char* sort,\n                   LAPACK_D_SELECT3 selctg, lapack_int* n, double* a,\n                   lapack_int* lda, double* b, lapack_int* ldb,\n                   lapack_int* sdim, double* alphar, double* alphai,\n                   double* beta, double* vsl, lapack_int* ldvsl, double* vsr,\n                   lapack_int* ldvsr, double* work, lapack_int* lwork,\n                   lapack_logical* bwork, lapack_int *info );\nvoid LAPACK_cgges( char* jobvsl, char* jobvsr, char* sort,\n                   LAPACK_C_SELECT2 selctg, lapack_int* n,\n                   lapack_complex_float* a, lapack_int* lda,\n                   lapack_complex_float* b, lapack_int* ldb, lapack_int* sdim,\n                   lapack_complex_float* alpha, lapack_complex_float* beta,\n                   lapack_complex_float* vsl, lapack_int* ldvsl,\n                   lapack_complex_float* vsr, lapack_int* ldvsr,\n                   lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                   lapack_logical* bwork, lapack_int *info );\nvoid LAPACK_zgges( char* jobvsl, char* jobvsr, char* sort,\n                   LAPACK_Z_SELECT2 selctg, lapack_int* n,\n                   lapack_complex_double* a, lapack_int* lda,\n                   lapack_complex_double* b, lapack_int* ldb, lapack_int* sdim,\n                   lapack_complex_double* alpha, lapack_complex_double* beta,\n                   lapack_complex_double* vsl, lapack_int* ldvsl,\n                   lapack_complex_double* vsr, lapack_int* ldvsr,\n                   lapack_complex_double* work, lapack_int* lwork,\n                   double* rwork, lapack_logical* bwork, lapack_int *info );\nvoid LAPACK_sggesx( char* jobvsl, char* jobvsr, char* sort,\n                    LAPACK_S_SELECT3 selctg, char* sense, lapack_int* n,\n                    float* a, lapack_int* lda, float* b, lapack_int* ldb,\n                    lapack_int* sdim, float* alphar, float* alphai, float* beta,\n                    float* vsl, lapack_int* ldvsl, float* vsr,\n                    lapack_int* ldvsr, float* rconde, float* rcondv,\n                    float* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_logical* bwork,\n                    lapack_int *info );\nvoid LAPACK_dggesx( char* jobvsl, char* jobvsr, char* sort,\n                    LAPACK_D_SELECT3 selctg, char* sense, lapack_int* n,\n                    double* a, lapack_int* lda, double* b, lapack_int* ldb,\n                    lapack_int* sdim, double* alphar, double* alphai,\n                    double* beta, double* vsl, lapack_int* ldvsl, double* vsr,\n                    lapack_int* ldvsr, double* rconde, double* rcondv,\n                    double* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_logical* bwork,\n                    lapack_int *info );\nvoid LAPACK_cggesx( char* jobvsl, char* jobvsr, char* sort,\n                    LAPACK_C_SELECT2 selctg, char* sense, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb, lapack_int* sdim,\n                    lapack_complex_float* alpha, lapack_complex_float* beta,\n                    lapack_complex_float* vsl, lapack_int* ldvsl,\n                    lapack_complex_float* vsr, lapack_int* ldvsr, float* rconde,\n                    float* rcondv, lapack_complex_float* work,\n                    lapack_int* lwork, float* rwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_logical* bwork,\n                    lapack_int *info );\nvoid LAPACK_zggesx( char* jobvsl, char* jobvsr, char* sort,\n                    LAPACK_Z_SELECT2 selctg, char* sense, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb, lapack_int* sdim,\n                    lapack_complex_double* alpha, lapack_complex_double* beta,\n                    lapack_complex_double* vsl, lapack_int* ldvsl,\n                    lapack_complex_double* vsr, lapack_int* ldvsr,\n                    double* rconde, double* rcondv, lapack_complex_double* work,\n                    lapack_int* lwork, double* rwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_logical* bwork,\n                    lapack_int *info );\nvoid LAPACK_sggev( char* jobvl, char* jobvr, lapack_int* n, float* a,\n                   lapack_int* lda, float* b, lapack_int* ldb, float* alphar,\n                   float* alphai, float* beta, float* vl, lapack_int* ldvl,\n                   float* vr, lapack_int* ldvr, float* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_dggev( char* jobvl, char* jobvr, lapack_int* n, double* a,\n                   lapack_int* lda, double* b, lapack_int* ldb, double* alphar,\n                   double* alphai, double* beta, double* vl, lapack_int* ldvl,\n                   double* vr, lapack_int* ldvr, double* work,\n                   lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cggev( char* jobvl, char* jobvr, lapack_int* n,\n                   lapack_complex_float* a, lapack_int* lda,\n                   lapack_complex_float* b, lapack_int* ldb,\n                   lapack_complex_float* alpha, lapack_complex_float* beta,\n                   lapack_complex_float* vl, lapack_int* ldvl,\n                   lapack_complex_float* vr, lapack_int* ldvr,\n                   lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                   lapack_int *info );\nvoid LAPACK_zggev( char* jobvl, char* jobvr, lapack_int* n,\n                   lapack_complex_double* a, lapack_int* lda,\n                   lapack_complex_double* b, lapack_int* ldb,\n                   lapack_complex_double* alpha, lapack_complex_double* beta,\n                   lapack_complex_double* vl, lapack_int* ldvl,\n                   lapack_complex_double* vr, lapack_int* ldvr,\n                   lapack_complex_double* work, lapack_int* lwork,\n                   double* rwork, lapack_int *info );\nvoid LAPACK_sggevx( char* balanc, char* jobvl, char* jobvr, char* sense,\n                    lapack_int* n, float* a, lapack_int* lda, float* b,\n                    lapack_int* ldb, float* alphar, float* alphai, float* beta,\n                    float* vl, lapack_int* ldvl, float* vr, lapack_int* ldvr,\n                    lapack_int* ilo, lapack_int* ihi, float* lscale,\n                    float* rscale, float* abnrm, float* bbnrm, float* rconde,\n                    float* rcondv, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_logical* bwork,\n                    lapack_int *info );\nvoid LAPACK_dggevx( char* balanc, char* jobvl, char* jobvr, char* sense,\n                    lapack_int* n, double* a, lapack_int* lda, double* b,\n                    lapack_int* ldb, double* alphar, double* alphai,\n                    double* beta, double* vl, lapack_int* ldvl, double* vr,\n                    lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi,\n                    double* lscale, double* rscale, double* abnrm,\n                    double* bbnrm, double* rconde, double* rcondv, double* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_logical* bwork,\n                    lapack_int *info );\nvoid LAPACK_cggevx( char* balanc, char* jobvl, char* jobvr, char* sense,\n                    lapack_int* n, lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* alpha, lapack_complex_float* beta,\n                    lapack_complex_float* vl, lapack_int* ldvl,\n                    lapack_complex_float* vr, lapack_int* ldvr, lapack_int* ilo,\n                    lapack_int* ihi, float* lscale, float* rscale, float* abnrm,\n                    float* bbnrm, float* rconde, float* rcondv,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int* iwork, lapack_logical* bwork,\n                    lapack_int *info );\nvoid LAPACK_zggevx( char* balanc, char* jobvl, char* jobvr, char* sense,\n                    lapack_int* n, lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* alpha, lapack_complex_double* beta,\n                    lapack_complex_double* vl, lapack_int* ldvl,\n                    lapack_complex_double* vr, lapack_int* ldvr,\n                    lapack_int* ilo, lapack_int* ihi, double* lscale,\n                    double* rscale, double* abnrm, double* bbnrm,\n                    double* rconde, double* rcondv, lapack_complex_double* work,\n                    lapack_int* lwork, double* rwork, lapack_int* iwork,\n                    lapack_logical* bwork, lapack_int *info );\nvoid LAPACK_dsfrk( char* transr, char* uplo, char* trans, lapack_int* n,\n                   lapack_int* k, double* alpha, const double* a,\n                   lapack_int* lda, double* beta, double* c );\nvoid LAPACK_ssfrk( char* transr, char* uplo, char* trans, lapack_int* n,\n                   lapack_int* k, float* alpha, const float* a, lapack_int* lda,\n                   float* beta, float* c );\nvoid LAPACK_zhfrk( char* transr, char* uplo, char* trans, lapack_int* n,\n                   lapack_int* k, double* alpha, const lapack_complex_double* a,\n                   lapack_int* lda, double* beta, lapack_complex_double* c );\nvoid LAPACK_chfrk( char* transr, char* uplo, char* trans, lapack_int* n,\n                   lapack_int* k, float* alpha, const lapack_complex_float* a,\n                   lapack_int* lda, float* beta, lapack_complex_float* c );\nvoid LAPACK_dtfsm( char* transr, char* side, char* uplo, char* trans,\n                   char* diag, lapack_int* m, lapack_int* n, double* alpha,\n                   const double* a, double* b, lapack_int* ldb );\nvoid LAPACK_stfsm( char* transr, char* side, char* uplo, char* trans,\n                   char* diag, lapack_int* m, lapack_int* n, float* alpha,\n                   const float* a, float* b, lapack_int* ldb );\nvoid LAPACK_ztfsm( char* transr, char* side, char* uplo, char* trans,\n                   char* diag, lapack_int* m, lapack_int* n,\n                   lapack_complex_double* alpha, const lapack_complex_double* a,\n                   lapack_complex_double* b, lapack_int* ldb );\nvoid LAPACK_ctfsm( char* transr, char* side, char* uplo, char* trans,\n                   char* diag, lapack_int* m, lapack_int* n,\n                   lapack_complex_float* alpha, const lapack_complex_float* a,\n                   lapack_complex_float* b, lapack_int* ldb );\nvoid LAPACK_dtfttp( char* transr, char* uplo, lapack_int* n, const double* arf,\n                    double* ap, lapack_int *info );\nvoid LAPACK_stfttp( char* transr, char* uplo, lapack_int* n, const float* arf,\n                    float* ap, lapack_int *info );\nvoid LAPACK_ztfttp( char* transr, char* uplo, lapack_int* n,\n                    const lapack_complex_double* arf, lapack_complex_double* ap,\n                    lapack_int *info );\nvoid LAPACK_ctfttp( char* transr, char* uplo, lapack_int* n,\n                    const lapack_complex_float* arf, lapack_complex_float* ap,\n                    lapack_int *info );\nvoid LAPACK_dtfttr( char* transr, char* uplo, lapack_int* n, const double* arf,\n                    double* a, lapack_int* lda, lapack_int *info );\nvoid LAPACK_stfttr( char* transr, char* uplo, lapack_int* n, const float* arf,\n                    float* a, lapack_int* lda, lapack_int *info );\nvoid LAPACK_ztfttr( char* transr, char* uplo, lapack_int* n,\n                    const lapack_complex_double* arf, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_ctfttr( char* transr, char* uplo, lapack_int* n,\n                    const lapack_complex_float* arf, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_dtpttf( char* transr, char* uplo, lapack_int* n, const double* ap,\n                    double* arf, lapack_int *info );\nvoid LAPACK_stpttf( char* transr, char* uplo, lapack_int* n, const float* ap,\n                    float* arf, lapack_int *info );\nvoid LAPACK_ztpttf( char* transr, char* uplo, lapack_int* n,\n                    const lapack_complex_double* ap, lapack_complex_double* arf,\n                    lapack_int *info );\nvoid LAPACK_ctpttf( char* transr, char* uplo, lapack_int* n,\n                    const lapack_complex_float* ap, lapack_complex_float* arf,\n                    lapack_int *info );\nvoid LAPACK_dtpttr( char* uplo, lapack_int* n, const double* ap, double* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_stpttr( char* uplo, lapack_int* n, const float* ap, float* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_ztpttr( char* uplo, lapack_int* n, const lapack_complex_double* ap,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_ctpttr( char* uplo, lapack_int* n, const lapack_complex_float* ap,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_dtrttf( char* transr, char* uplo, lapack_int* n, const double* a,\n                    lapack_int* lda, double* arf, lapack_int *info );\nvoid LAPACK_strttf( char* transr, char* uplo, lapack_int* n, const float* a,\n                    lapack_int* lda, float* arf, lapack_int *info );\nvoid LAPACK_ztrttf( char* transr, char* uplo, lapack_int* n,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* arf, lapack_int *info );\nvoid LAPACK_ctrttf( char* transr, char* uplo, lapack_int* n,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* arf, lapack_int *info );\nvoid LAPACK_dtrttp( char* uplo, lapack_int* n, const double* a, lapack_int* lda,\n                    double* ap, lapack_int *info );\nvoid LAPACK_strttp( char* uplo, lapack_int* n, const float* a, lapack_int* lda,\n                    float* ap, lapack_int *info );\nvoid LAPACK_ztrttp( char* uplo, lapack_int* n, const lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* ap,\n                    lapack_int *info );\nvoid LAPACK_ctrttp( char* uplo, lapack_int* n, const lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* ap,\n                    lapack_int *info );\nvoid LAPACK_sgeqrfp( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                     float* tau, float* work, lapack_int* lwork,\n                     lapack_int *info );\nvoid LAPACK_dgeqrfp( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                     double* tau, double* work, lapack_int* lwork,\n                     lapack_int *info );\nvoid LAPACK_cgeqrfp( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                     lapack_int* lda, lapack_complex_float* tau,\n                     lapack_complex_float* work, lapack_int* lwork,\n                     lapack_int *info );\nvoid LAPACK_zgeqrfp( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                     lapack_int* lda, lapack_complex_double* tau,\n                     lapack_complex_double* work, lapack_int* lwork,\n                     lapack_int *info );\nvoid LAPACK_clacgv( lapack_int* n, lapack_complex_float* x, lapack_int* incx );\nvoid LAPACK_zlacgv( lapack_int* n, lapack_complex_double* x, lapack_int* incx );\nvoid LAPACK_slarnv( lapack_int* idist, lapack_int* iseed, lapack_int* n,\n                    float* x );\nvoid LAPACK_dlarnv( lapack_int* idist, lapack_int* iseed, lapack_int* n,\n                    double* x );\nvoid LAPACK_clarnv( lapack_int* idist, lapack_int* iseed, lapack_int* n,\n                    lapack_complex_float* x );\nvoid LAPACK_zlarnv( lapack_int* idist, lapack_int* iseed, lapack_int* n,\n                    lapack_complex_double* x );\nvoid LAPACK_sgeqr2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    float* tau, float* work, lapack_int *info );\nvoid LAPACK_dgeqr2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    double* tau, double* work, lapack_int *info );\nvoid LAPACK_cgeqr2( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* tau,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zgeqr2( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_slacn2( lapack_int* n, float* v, float* x, lapack_int* isgn,\n                    float* est, lapack_int* kase, lapack_int* isave );\nvoid LAPACK_dlacn2( lapack_int* n, double* v, double* x, lapack_int* isgn,\n                    double* est, lapack_int* kase, lapack_int* isave );\nvoid LAPACK_clacn2( lapack_int* n, lapack_complex_float* v,\n                    lapack_complex_float* x, float* est,\n                    lapack_int* kase, lapack_int* isave );\nvoid LAPACK_zlacn2( lapack_int* n, lapack_complex_double* v,\n                    lapack_complex_double* x, double* est,\n                    lapack_int* kase, lapack_int* isave );\nvoid LAPACK_slacpy( char* uplo, lapack_int* m, lapack_int* n, const float* a,\n                    lapack_int* lda, float* b, lapack_int* ldb );\nvoid LAPACK_dlacpy( char* uplo, lapack_int* m, lapack_int* n, const double* a,\n                    lapack_int* lda, double* b, lapack_int* ldb );\nvoid LAPACK_clacpy( char* uplo, lapack_int* m, lapack_int* n,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb );\nvoid LAPACK_zlacpy( char* uplo, lapack_int* m, lapack_int* n,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb );\n\nvoid LAPACK_clacp2( char* uplo, lapack_int* m, lapack_int* n, const float* a,\n                    lapack_int* lda, lapack_complex_float* b, lapack_int* ldb );\nvoid LAPACK_zlacp2( char* uplo, lapack_int* m, lapack_int* n, const double* a,\n                    lapack_int* lda, lapack_complex_double* b,\n                    lapack_int* ldb );\n\nvoid LAPACK_sgetf2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_dgetf2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_cgetf2( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_zgetf2( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_slaswp( lapack_int* n, float* a, lapack_int* lda, lapack_int* k1,\n                    lapack_int* k2, const lapack_int* ipiv, lapack_int* incx );\nvoid LAPACK_dlaswp( lapack_int* n, double* a, lapack_int* lda, lapack_int* k1,\n                    lapack_int* k2, const lapack_int* ipiv, lapack_int* incx );\nvoid LAPACK_claswp( lapack_int* n, lapack_complex_float* a, lapack_int* lda,\n                    lapack_int* k1, lapack_int* k2, const lapack_int* ipiv,\n                    lapack_int* incx );\nvoid LAPACK_zlaswp( lapack_int* n, lapack_complex_double* a, lapack_int* lda,\n                    lapack_int* k1, lapack_int* k2, const lapack_int* ipiv,\n                    lapack_int* incx );\nfloat LAPACK_slange( char* norm, lapack_int* m, lapack_int* n, const float* a,\n                    lapack_int* lda, float* work );\ndouble LAPACK_dlange( char* norm, lapack_int* m, lapack_int* n, const double* a,\n                    lapack_int* lda, double* work );\nfloat LAPACK_clange( char* norm, lapack_int* m, lapack_int* n,\n                    const lapack_complex_float* a, lapack_int* lda, float* work );\ndouble LAPACK_zlange( char* norm, lapack_int* m, lapack_int* n,\n                    const lapack_complex_double* a, lapack_int* lda, double* work );\nfloat LAPACK_clanhe( char* norm, char* uplo, lapack_int* n,\n                    const lapack_complex_float* a, lapack_int* lda, float* work );\ndouble LAPACK_zlanhe( char* norm, char* uplo, lapack_int* n,\n                    const lapack_complex_double* a, lapack_int* lda, double* work );\nfloat LAPACK_slansy( char* norm, char* uplo, lapack_int* n, const float* a,\n                    lapack_int* lda, float* work );\ndouble LAPACK_dlansy( char* norm, char* uplo, lapack_int* n, const double* a,\n                    lapack_int* lda, double* work );\nfloat LAPACK_clansy( char* norm, char* uplo, lapack_int* n,\n                    const lapack_complex_float* a, lapack_int* lda, float* work );\ndouble LAPACK_zlansy( char* norm, char* uplo, lapack_int* n,\n                    const lapack_complex_double* a, lapack_int* lda, double* work );\nfloat LAPACK_slantr( char* norm, char* uplo, char* diag, lapack_int* m,\n                    lapack_int* n, const float* a, lapack_int* lda, float* work );\ndouble LAPACK_dlantr( char* norm, char* uplo, char* diag, lapack_int* m,\n                    lapack_int* n, const double* a, lapack_int* lda, double* work );\nfloat LAPACK_clantr( char* norm, char* uplo, char* diag, lapack_int* m,\n                    lapack_int* n, const lapack_complex_float* a, lapack_int* lda,\n                    float* work );\ndouble LAPACK_zlantr( char* norm, char* uplo, char* diag, lapack_int* m,\n                    lapack_int* n, const lapack_complex_double* a, lapack_int* lda,\n                    double* work );\nfloat LAPACK_slamch( char* cmach );\ndouble LAPACK_dlamch( char* cmach );\nvoid LAPACK_sgelq2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    float* tau, float* work, lapack_int *info );\nvoid LAPACK_dgelq2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    double* tau, double* work, lapack_int *info );\nvoid LAPACK_cgelq2( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* tau,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zgelq2( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_slarfb( char* side, char* trans, char* direct, char* storev,\n                    lapack_int* m, lapack_int* n, lapack_int* k, const float* v,\n                    lapack_int* ldv, const float* t, lapack_int* ldt, float* c,\n                    lapack_int* ldc, float* work, lapack_int* ldwork );\nvoid LAPACK_dlarfb( char* side, char* trans, char* direct, char* storev,\n                    lapack_int* m, lapack_int* n, lapack_int* k,\n                    const double* v, lapack_int* ldv, const double* t,\n                    lapack_int* ldt, double* c, lapack_int* ldc, double* work,\n                    lapack_int* ldwork );\nvoid LAPACK_clarfb( char* side, char* trans, char* direct, char* storev,\n                    lapack_int* m, lapack_int* n, lapack_int* k,\n                    const lapack_complex_float* v, lapack_int* ldv,\n                    const lapack_complex_float* t, lapack_int* ldt,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    lapack_complex_float* work, lapack_int* ldwork );\nvoid LAPACK_zlarfb( char* side, char* trans, char* direct, char* storev,\n                    lapack_int* m, lapack_int* n, lapack_int* k,\n                    const lapack_complex_double* v, lapack_int* ldv,\n                    const lapack_complex_double* t, lapack_int* ldt,\n                    lapack_complex_double* c, lapack_int* ldc,\n                    lapack_complex_double* work, lapack_int* ldwork );\nvoid LAPACK_slarfg( lapack_int* n, float* alpha, float* x, lapack_int* incx,\n                    float* tau );\nvoid LAPACK_dlarfg( lapack_int* n, double* alpha, double* x, lapack_int* incx,\n                    double* tau );\nvoid LAPACK_clarfg( lapack_int* n, lapack_complex_float* alpha,\n                    lapack_complex_float* x, lapack_int* incx,\n                    lapack_complex_float* tau );\nvoid LAPACK_zlarfg( lapack_int* n, lapack_complex_double* alpha,\n                    lapack_complex_double* x, lapack_int* incx,\n                    lapack_complex_double* tau );\nvoid LAPACK_slarft( char* direct, char* storev, lapack_int* n, lapack_int* k,\n                    const float* v, lapack_int* ldv, const float* tau, float* t,\n                    lapack_int* ldt );\nvoid LAPACK_dlarft( char* direct, char* storev, lapack_int* n, lapack_int* k,\n                    const double* v, lapack_int* ldv, const double* tau,\n                    double* t, lapack_int* ldt );\nvoid LAPACK_clarft( char* direct, char* storev, lapack_int* n, lapack_int* k,\n                    const lapack_complex_float* v, lapack_int* ldv,\n                    const lapack_complex_float* tau, lapack_complex_float* t,\n                    lapack_int* ldt );\nvoid LAPACK_zlarft( char* direct, char* storev, lapack_int* n, lapack_int* k,\n                    const lapack_complex_double* v, lapack_int* ldv,\n                    const lapack_complex_double* tau, lapack_complex_double* t,\n                    lapack_int* ldt );\nvoid LAPACK_slarfx( char* side, lapack_int* m, lapack_int* n, const float* v,\n                    float* tau, float* c, lapack_int* ldc, float* work );\nvoid LAPACK_dlarfx( char* side, lapack_int* m, lapack_int* n, const double* v,\n                    double* tau, double* c, lapack_int* ldc, double* work );\nvoid LAPACK_clarfx( char* side, lapack_int* m, lapack_int* n,\n                    const lapack_complex_float* v, lapack_complex_float* tau,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    lapack_complex_float* work );\nvoid LAPACK_zlarfx( char* side, lapack_int* m, lapack_int* n,\n                    const lapack_complex_double* v, lapack_complex_double* tau,\n                    lapack_complex_double* c, lapack_int* ldc,\n                    lapack_complex_double* work );\nvoid LAPACK_slatms( lapack_int* m, lapack_int* n, char* dist, lapack_int* iseed,\n                    char* sym, float* d, lapack_int* mode, float* cond,\n                    float* dmax, lapack_int* kl, lapack_int* ku, char* pack,\n                    float* a, lapack_int* lda, float* work, lapack_int *info );\nvoid LAPACK_dlatms( lapack_int* m, lapack_int* n, char* dist, lapack_int* iseed,\n                    char* sym, double* d, lapack_int* mode, double* cond,\n                    double* dmax, lapack_int* kl, lapack_int* ku, char* pack,\n                    double* a, lapack_int* lda, double* work,\n                    lapack_int *info );\nvoid LAPACK_clatms( lapack_int* m, lapack_int* n, char* dist, lapack_int* iseed,\n                    char* sym, float* d, lapack_int* mode, float* cond,\n                    float* dmax, lapack_int* kl, lapack_int* ku, char* pack,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zlatms( lapack_int* m, lapack_int* n, char* dist, lapack_int* iseed,\n                    char* sym, double* d, lapack_int* mode, double* cond,\n                    double* dmax, lapack_int* kl, lapack_int* ku, char* pack,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_slag2d( lapack_int* m, lapack_int* n, const float* sa,\n                    lapack_int* ldsa, double* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_dlag2s( lapack_int* m, lapack_int* n, const double* a,\n                    lapack_int* lda, float* sa, lapack_int* ldsa,\n                    lapack_int *info );\nvoid LAPACK_clag2z( lapack_int* m, lapack_int* n,\n                    const lapack_complex_float* sa, lapack_int* ldsa,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_zlag2c( lapack_int* m, lapack_int* n,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_float* sa, lapack_int* ldsa,\n                    lapack_int *info );\nvoid LAPACK_slauum( char* uplo, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_dlauum( char* uplo, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_clauum( char* uplo, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_zlauum( char* uplo, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_slagge( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, const float* d, float* a, lapack_int* lda,\n                    lapack_int* iseed, float* work, lapack_int *info );\nvoid LAPACK_dlagge( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, const double* d, double* a, lapack_int* lda,\n                    lapack_int* iseed, double* work, lapack_int *info );\nvoid LAPACK_clagge( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, const float* d, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int* iseed,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zlagge( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, const double* d, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* iseed,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_slaset( char* uplo, lapack_int* m, lapack_int* n, float* alpha,\n                    float* beta, float* a, lapack_int* lda );\nvoid LAPACK_dlaset( char* uplo, lapack_int* m, lapack_int* n, double* alpha,\n                    double* beta, double* a, lapack_int* lda );\nvoid LAPACK_claset( char* uplo, lapack_int* m, lapack_int* n,\n                    lapack_complex_float* alpha, lapack_complex_float* beta,\n                    lapack_complex_float* a, lapack_int* lda );\nvoid LAPACK_zlaset( char* uplo, lapack_int* m, lapack_int* n,\n                    lapack_complex_double* alpha, lapack_complex_double* beta,\n                    lapack_complex_double* a, lapack_int* lda );\nvoid LAPACK_slasrt( char* id, lapack_int* n, float* d, lapack_int *info );\nvoid LAPACK_dlasrt( char* id, lapack_int* n, double* d, lapack_int *info );\nvoid LAPACK_claghe( lapack_int* n, lapack_int* k, const float* d,\n                    lapack_complex_float* a, lapack_int* lda, lapack_int* iseed,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zlaghe( lapack_int* n, lapack_int* k, const double* d,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_int* iseed, lapack_complex_double* work,\n                    lapack_int *info );\nvoid LAPACK_slagsy( lapack_int* n, lapack_int* k, const float* d, float* a,\n                    lapack_int* lda, lapack_int* iseed, float* work,\n                    lapack_int *info );\nvoid LAPACK_dlagsy( lapack_int* n, lapack_int* k, const double* d, double* a,\n                    lapack_int* lda, lapack_int* iseed, double* work,\n                    lapack_int *info );\nvoid LAPACK_clagsy( lapack_int* n, lapack_int* k, const float* d,\n                    lapack_complex_float* a, lapack_int* lda, lapack_int* iseed,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zlagsy( lapack_int* n, lapack_int* k, const double* d,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_int* iseed, lapack_complex_double* work,\n                    lapack_int *info );\nvoid LAPACK_slapmr( lapack_logical* forwrd, lapack_int* m, lapack_int* n,\n                    float* x, lapack_int* ldx, lapack_int* k );\nvoid LAPACK_dlapmr( lapack_logical* forwrd, lapack_int* m, lapack_int* n,\n                    double* x, lapack_int* ldx, lapack_int* k );\nvoid LAPACK_clapmr( lapack_logical* forwrd, lapack_int* m, lapack_int* n,\n                    lapack_complex_float* x, lapack_int* ldx, lapack_int* k );\nvoid LAPACK_zlapmr( lapack_logical* forwrd, lapack_int* m, lapack_int* n,\n                    lapack_complex_double* x, lapack_int* ldx, lapack_int* k );\nfloat LAPACK_slapy2( float* x, float* y );\ndouble LAPACK_dlapy2( double* x, double* y );\nfloat LAPACK_slapy3( float* x, float* y, float* z );\ndouble LAPACK_dlapy3( double* x, double* y, double* z );\nvoid LAPACK_slartgp( float* f, float* g, float* cs, float* sn, float* r );\nvoid LAPACK_dlartgp( double* f, double* g, double* cs, double* sn, double* r );\nvoid LAPACK_slartgs( float* x, float* y, float* sigma, float* cs, float* sn );\nvoid LAPACK_dlartgs( double* x, double* y, double* sigma, double* cs,\n                     double* sn );\n// LAPACK 3.3.0\nvoid LAPACK_cbbcsd( char* jobu1, char* jobu2,\n                    char* jobv1t, char* jobv2t, char* trans,\n                    lapack_int* m, lapack_int* p, lapack_int* q,\n                    float* theta, float* phi,\n                    lapack_complex_float* u1, lapack_int* ldu1,\n                    lapack_complex_float* u2, lapack_int* ldu2,\n                    lapack_complex_float* v1t, lapack_int* ldv1t,\n                    lapack_complex_float* v2t, lapack_int* ldv2t,\n                    float* b11d, float* b11e, float* b12d,\n                    float* b12e, float* b21d, float* b21e,\n                    float* b22d, float* b22e, float* rwork,\n                    lapack_int* lrwork , lapack_int *info );\nvoid LAPACK_cheswapr( char* uplo, lapack_int* n,\n                      lapack_complex_float* a, lapack_int* i1,\n                      lapack_int* i2 );\nvoid LAPACK_chetri2( char* uplo, lapack_int* n,\n                     lapack_complex_float* a, lapack_int* lda,\n                     const lapack_int* ipiv,\n                     lapack_complex_float* work, lapack_int* lwork , lapack_int *info );\nvoid LAPACK_chetri2x( char* uplo, lapack_int* n,\n                      lapack_complex_float* a, lapack_int* lda,\n                      const lapack_int* ipiv,\n                      lapack_complex_float* work, lapack_int* nb , lapack_int *info );\nvoid LAPACK_chetrs2( char* uplo, lapack_int* n,\n                     lapack_int* nrhs, const lapack_complex_float* a,\n                     lapack_int* lda, const lapack_int* ipiv,\n                     lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* work , lapack_int *info );\nvoid LAPACK_csyconv( char* uplo, char* way,\n                     lapack_int* n, lapack_complex_float* a,\n                     lapack_int* lda, const lapack_int* ipiv,\n                     lapack_complex_float* work , lapack_int *info );\nvoid LAPACK_csyswapr( char* uplo, lapack_int* n,\n                      lapack_complex_float* a, lapack_int* i1,\n                      lapack_int* i2 );\nvoid LAPACK_csytri2( char* uplo, lapack_int* n,\n                     lapack_complex_float* a, lapack_int* lda,\n                     const lapack_int* ipiv,\n                     lapack_complex_float* work, lapack_int* lwork , lapack_int *info );\nvoid LAPACK_csytri2x( char* uplo, lapack_int* n,\n                      lapack_complex_float* a, lapack_int* lda,\n                      const lapack_int* ipiv,\n                      lapack_complex_float* work, lapack_int* nb , lapack_int *info );\nvoid LAPACK_csytrs2( char* uplo, lapack_int* n,\n                     lapack_int* nrhs, const lapack_complex_float* a,\n                     lapack_int* lda, const lapack_int* ipiv,\n                     lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* work , lapack_int *info );\nvoid LAPACK_cunbdb( char* trans, char* signs,\n                    lapack_int* m, lapack_int* p, lapack_int* q,\n                    lapack_complex_float* x11, lapack_int* ldx11,\n                    lapack_complex_float* x12, lapack_int* ldx12,\n                    lapack_complex_float* x21, lapack_int* ldx21,\n                    lapack_complex_float* x22, lapack_int* ldx22,\n                    float* theta, float* phi,\n                    lapack_complex_float* taup1,\n                    lapack_complex_float* taup2,\n                    lapack_complex_float* tauq1,\n                    lapack_complex_float* tauq2,\n                    lapack_complex_float* work, lapack_int* lwork , lapack_int *info );\nvoid LAPACK_cuncsd( char* jobu1, char* jobu2,\n                    char* jobv1t, char* jobv2t, char* trans,\n                    char* signs, lapack_int* m, lapack_int* p,\n                    lapack_int* q, lapack_complex_float* x11,\n                    lapack_int* ldx11, lapack_complex_float* x12,\n                    lapack_int* ldx12, lapack_complex_float* x21,\n                    lapack_int* ldx21, lapack_complex_float* x22,\n                    lapack_int* ldx22, float* theta,\n                    lapack_complex_float* u1, lapack_int* ldu1,\n                    lapack_complex_float* u2, lapack_int* ldu2,\n                    lapack_complex_float* v1t, lapack_int* ldv1t,\n                    lapack_complex_float* v2t, lapack_int* ldv2t,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    float* rwork, lapack_int* lrwork,\n                    lapack_int* iwork , lapack_int *info );\nvoid LAPACK_dbbcsd( char* jobu1, char* jobu2,\n                    char* jobv1t, char* jobv2t, char* trans,\n                    lapack_int* m, lapack_int* p, lapack_int* q,\n                    double* theta, double* phi, double* u1,\n                    lapack_int* ldu1, double* u2, lapack_int* ldu2,\n                    double* v1t, lapack_int* ldv1t, double* v2t,\n                    lapack_int* ldv2t, double* b11d, double* b11e,\n                    double* b12d, double* b12e, double* b21d,\n                    double* b21e, double* b22d, double* b22e,\n                    double* work, lapack_int* lwork , lapack_int *info );\nvoid LAPACK_dorbdb( char* trans, char* signs,\n                    lapack_int* m, lapack_int* p, lapack_int* q,\n                    double* x11, lapack_int* ldx11, double* x12,\n                    lapack_int* ldx12, double* x21, lapack_int* ldx21,\n                    double* x22, lapack_int* ldx22, double* theta,\n                    double* phi, double* taup1, double* taup2,\n                    double* tauq1, double* tauq2, double* work,\n                    lapack_int* lwork , lapack_int *info );\nvoid LAPACK_dorcsd( char* jobu1, char* jobu2,\n                    char* jobv1t, char* jobv2t, char* trans,\n                    char* signs, lapack_int* m, lapack_int* p,\n                    lapack_int* q, double* x11, lapack_int* ldx11,\n                    double* x12, lapack_int* ldx12, double* x21,\n                    lapack_int* ldx21, double* x22, lapack_int* ldx22,\n                    double* theta, double* u1, lapack_int* ldu1,\n                    double* u2, lapack_int* ldu2, double* v1t,\n                    lapack_int* ldv1t, double* v2t, lapack_int* ldv2t,\n                    double* work, lapack_int* lwork,\n                    lapack_int* iwork , lapack_int *info );\nvoid LAPACK_dsyconv( char* uplo, char* way,\n                     lapack_int* n, double* a, lapack_int* lda,\n                     const lapack_int* ipiv, double* work , lapack_int *info );\nvoid LAPACK_dsyswapr( char* uplo, lapack_int* n,\n                      double* a, lapack_int* i1, lapack_int* i2 );\nvoid LAPACK_dsytri2( char* uplo, lapack_int* n,\n                     double* a, lapack_int* lda,\n                     const lapack_int* ipiv,\n                     lapack_complex_double* work, lapack_int* lwork , lapack_int *info );\nvoid LAPACK_dsytri2x( char* uplo, lapack_int* n,\n                      double* a, lapack_int* lda,\n                      const lapack_int* ipiv, double* work,\n                      lapack_int* nb , lapack_int *info );\nvoid LAPACK_dsytrs2( char* uplo, lapack_int* n,\n                     lapack_int* nrhs, const double* a,\n                     lapack_int* lda, const lapack_int* ipiv,\n                     double* b, lapack_int* ldb, double* work , lapack_int *info );\nvoid LAPACK_sbbcsd( char* jobu1, char* jobu2,\n                    char* jobv1t, char* jobv2t, char* trans,\n                    lapack_int* m, lapack_int* p, lapack_int* q,\n                    float* theta, float* phi, float* u1,\n                    lapack_int* ldu1, float* u2, lapack_int* ldu2,\n                    float* v1t, lapack_int* ldv1t, float* v2t,\n                    lapack_int* ldv2t, float* b11d, float* b11e,\n                    float* b12d, float* b12e, float* b21d,\n                    float* b21e, float* b22d, float* b22e,\n                    float* work, lapack_int* lwork , lapack_int *info );\nvoid LAPACK_sorbdb( char* trans, char* signs,\n                    lapack_int* m, lapack_int* p, lapack_int* q,\n                    float* x11, lapack_int* ldx11, float* x12,\n                    lapack_int* ldx12, float* x21, lapack_int* ldx21,\n                    float* x22, lapack_int* ldx22, float* theta,\n                    float* phi, float* taup1, float* taup2,\n                    float* tauq1, float* tauq2, float* work,\n                    lapack_int* lwork , lapack_int *info );\nvoid LAPACK_sorcsd( char* jobu1, char* jobu2,\n                    char* jobv1t, char* jobv2t, char* trans,\n                    char* signs, lapack_int* m, lapack_int* p,\n                    lapack_int* q, float* x11, lapack_int* ldx11,\n                    float* x12, lapack_int* ldx12, float* x21,\n                    lapack_int* ldx21, float* x22, lapack_int* ldx22,\n                    float* theta, float* u1, lapack_int* ldu1,\n                    float* u2, lapack_int* ldu2, float* v1t,\n                    lapack_int* ldv1t, float* v2t, lapack_int* ldv2t,\n                    float* work, lapack_int* lwork,\n                    lapack_int* iwork , lapack_int *info );\nvoid LAPACK_ssyconv( char* uplo, char* way,\n                     lapack_int* n, float* a, lapack_int* lda,\n                     const lapack_int* ipiv, float* work , lapack_int *info );\nvoid LAPACK_ssyswapr( char* uplo, lapack_int* n,\n                      float* a, lapack_int* i1, lapack_int* i2 );\nvoid LAPACK_ssytri2( char* uplo, lapack_int* n,\n                     float* a, lapack_int* lda,\n                     const lapack_int* ipiv,\n                     lapack_complex_float* work, lapack_int* lwork , lapack_int *info );\nvoid LAPACK_ssytri2x( char* uplo, lapack_int* n,\n                      float* a, lapack_int* lda,\n                      const lapack_int* ipiv, float* work,\n                      lapack_int* nb , lapack_int *info );\nvoid LAPACK_ssytrs2( char* uplo, lapack_int* n,\n                     lapack_int* nrhs, const float* a,\n                     lapack_int* lda, const lapack_int* ipiv,\n                     float* b, lapack_int* ldb, float* work , lapack_int *info );\nvoid LAPACK_zbbcsd( char* jobu1, char* jobu2,\n                    char* jobv1t, char* jobv2t, char* trans,\n                    lapack_int* m, lapack_int* p, lapack_int* q,\n                    double* theta, double* phi,\n                    lapack_complex_double* u1, lapack_int* ldu1,\n                    lapack_complex_double* u2, lapack_int* ldu2,\n                    lapack_complex_double* v1t, lapack_int* ldv1t,\n                    lapack_complex_double* v2t, lapack_int* ldv2t,\n                    double* b11d, double* b11e, double* b12d,\n                    double* b12e, double* b21d, double* b21e,\n                    double* b22d, double* b22e, double* rwork,\n                    lapack_int* lrwork , lapack_int *info );\nvoid LAPACK_zheswapr( char* uplo, lapack_int* n,\n                      lapack_complex_double* a, lapack_int* i1,\n                      lapack_int* i2 );\nvoid LAPACK_zhetri2( char* uplo, lapack_int* n,\n                     lapack_complex_double* a, lapack_int* lda,\n                     const lapack_int* ipiv,\n                     lapack_complex_double* work, lapack_int* lwork , lapack_int *info );\nvoid LAPACK_zhetri2x( char* uplo, lapack_int* n,\n                      lapack_complex_double* a, lapack_int* lda,\n                      const lapack_int* ipiv,\n                      lapack_complex_double* work, lapack_int* nb , lapack_int *info );\nvoid LAPACK_zhetrs2( char* uplo, lapack_int* n,\n                     lapack_int* nrhs,\n                     const lapack_complex_double* a, lapack_int* lda,\n                     const lapack_int* ipiv,\n                     lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* work , lapack_int *info );\nvoid LAPACK_zsyconv( char* uplo, char* way,\n                     lapack_int* n, lapack_complex_double* a,\n                     lapack_int* lda, const lapack_int* ipiv,\n                     lapack_complex_double* work , lapack_int *info );\nvoid LAPACK_zsyswapr( char* uplo, lapack_int* n,\n                      lapack_complex_double* a, lapack_int* i1,\n                      lapack_int* i2 );\nvoid LAPACK_zsytri2( char* uplo, lapack_int* n,\n                     lapack_complex_double* a, lapack_int* lda,\n                     const lapack_int* ipiv,\n                     lapack_complex_double* work, lapack_int* lwork , lapack_int *info );\nvoid LAPACK_zsytri2x( char* uplo, lapack_int* n,\n                      lapack_complex_double* a, lapack_int* lda,\n                      const lapack_int* ipiv,\n                      lapack_complex_double* work, lapack_int* nb , lapack_int *info );\nvoid LAPACK_zsytrs2( char* uplo, lapack_int* n,\n                     lapack_int* nrhs,\n                     const lapack_complex_double* a, lapack_int* lda,\n                     const lapack_int* ipiv,\n                     lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* work , lapack_int *info );\nvoid LAPACK_zunbdb( char* trans, char* signs,\n                    lapack_int* m, lapack_int* p, lapack_int* q,\n                    lapack_complex_double* x11, lapack_int* ldx11,\n                    lapack_complex_double* x12, lapack_int* ldx12,\n                    lapack_complex_double* x21, lapack_int* ldx21,\n                    lapack_complex_double* x22, lapack_int* ldx22,\n                    double* theta, double* phi,\n                    lapack_complex_double* taup1,\n                    lapack_complex_double* taup2,\n                    lapack_complex_double* tauq1,\n                    lapack_complex_double* tauq2,\n                    lapack_complex_double* work, lapack_int* lwork , lapack_int *info );\nvoid LAPACK_zuncsd( char* jobu1, char* jobu2,\n                    char* jobv1t, char* jobv2t, char* trans,\n                    char* signs, lapack_int* m, lapack_int* p,\n                    lapack_int* q, lapack_complex_double* x11,\n                    lapack_int* ldx11, lapack_complex_double* x12,\n                    lapack_int* ldx12, lapack_complex_double* x21,\n                    lapack_int* ldx21, lapack_complex_double* x22,\n                    lapack_int* ldx22, double* theta,\n                    lapack_complex_double* u1, lapack_int* ldu1,\n                    lapack_complex_double* u2, lapack_int* ldu2,\n                    lapack_complex_double* v1t, lapack_int* ldv1t,\n                    lapack_complex_double* v2t, lapack_int* ldv2t,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* lrwork,\n                    lapack_int* iwork , lapack_int *info );\n// LAPACK 3.4.0\nvoid LAPACK_sgemqrt( char* side, char* trans, lapack_int* m, lapack_int* n,\n                     lapack_int* k, lapack_int* nb, const float* v,\n                     lapack_int* ldv, const float* t, lapack_int* ldt, float* c,\n                     lapack_int* ldc, float* work, lapack_int *info );\nvoid LAPACK_dgemqrt( char* side, char* trans, lapack_int* m, lapack_int* n,\n                     lapack_int* k, lapack_int* nb, const double* v,\n                     lapack_int* ldv, const double* t, lapack_int* ldt,\n                     double* c, lapack_int* ldc, double* work,\n                     lapack_int *info );\nvoid LAPACK_cgemqrt( char* side, char* trans, lapack_int* m, lapack_int* n,\n                     lapack_int* k, lapack_int* nb,\n                     const lapack_complex_float* v, lapack_int* ldv,\n                     const lapack_complex_float* t, lapack_int* ldt,\n                     lapack_complex_float* c, lapack_int* ldc,\n                     lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zgemqrt( char* side, char* trans, lapack_int* m, lapack_int* n,\n                     lapack_int* k, lapack_int* nb,\n                     const lapack_complex_double* v, lapack_int* ldv,\n                     const lapack_complex_double* t, lapack_int* ldt,\n                     lapack_complex_double* c, lapack_int* ldc,\n                     lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_sgeqrt( lapack_int* m, lapack_int* n, lapack_int* nb, float* a,\n                    lapack_int* lda, float* t, lapack_int* ldt, float* work,\n                    lapack_int *info );\nvoid LAPACK_dgeqrt( lapack_int* m, lapack_int* n, lapack_int* nb, double* a,\n                    lapack_int* lda, double* t, lapack_int* ldt, double* work,\n                    lapack_int *info );\nvoid LAPACK_cgeqrt( lapack_int* m, lapack_int* n, lapack_int* nb,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* t, lapack_int* ldt,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zgeqrt( lapack_int* m, lapack_int* n, lapack_int* nb,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* t, lapack_int* ldt,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_sgeqrt2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                     float* t, lapack_int* ldt, lapack_int *info );\nvoid LAPACK_dgeqrt2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                     double* t, lapack_int* ldt, lapack_int *info );\nvoid LAPACK_cgeqrt2( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                     lapack_int* lda, lapack_complex_float* t, lapack_int* ldt,\n                     lapack_int *info );\nvoid LAPACK_zgeqrt2( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                     lapack_int* lda, lapack_complex_double* t, lapack_int* ldt,\n                     lapack_int *info );\nvoid LAPACK_sgeqrt3( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                     float* t, lapack_int* ldt, lapack_int *info );\nvoid LAPACK_dgeqrt3( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                     double* t, lapack_int* ldt, lapack_int *info );\nvoid LAPACK_cgeqrt3( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                     lapack_int* lda, lapack_complex_float* t, lapack_int* ldt,\n                     lapack_int *info );\nvoid LAPACK_zgeqrt3( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                     lapack_int* lda, lapack_complex_double* t, lapack_int* ldt,\n                     lapack_int *info );\nvoid LAPACK_stpmqrt( char* side, char* trans, lapack_int* m, lapack_int* n,\n                     lapack_int* k, lapack_int* l, lapack_int* nb,\n                     const float* v, lapack_int* ldv, const float* t,\n                     lapack_int* ldt, float* a, lapack_int* lda, float* b,\n                     lapack_int* ldb, float* work, lapack_int *info );\nvoid LAPACK_dtpmqrt( char* side, char* trans, lapack_int* m, lapack_int* n,\n                     lapack_int* k, lapack_int* l, lapack_int* nb,\n                     const double* v, lapack_int* ldv, const double* t,\n                     lapack_int* ldt, double* a, lapack_int* lda, double* b,\n                     lapack_int* ldb, double* work, lapack_int *info );\nvoid LAPACK_ctpmqrt( char* side, char* trans, lapack_int* m, lapack_int* n,\n                     lapack_int* k, lapack_int* l, lapack_int* nb,\n                     const lapack_complex_float* v, lapack_int* ldv,\n                     const lapack_complex_float* t, lapack_int* ldt,\n                     lapack_complex_float* a, lapack_int* lda,\n                     lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_ztpmqrt( char* side, char* trans, lapack_int* m, lapack_int* n,\n                     lapack_int* k, lapack_int* l, lapack_int* nb,\n                     const lapack_complex_double* v, lapack_int* ldv,\n                     const lapack_complex_double* t, lapack_int* ldt,\n                     lapack_complex_double* a, lapack_int* lda,\n                     lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_dtpqrt( lapack_int* m, lapack_int* n, lapack_int* l, lapack_int* nb,\n                    double* a, lapack_int* lda, double* b, lapack_int* ldb,\n                    double* t, lapack_int* ldt, double* work,\n                    lapack_int *info );\nvoid LAPACK_ctpqrt( lapack_int* m, lapack_int* n, lapack_int* l, lapack_int* nb,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* t, lapack_int* ldt,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_ztpqrt( lapack_int* m, lapack_int* n, lapack_int* l, lapack_int* nb,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* t, lapack_int* ldt,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_stpqrt2( lapack_int* m, lapack_int* n, lapack_int* l,\n                     float* a, lapack_int* lda,\n                     float* b, lapack_int* ldb,\n                     float* t, lapack_int* ldt,\n                     lapack_int *info );\nvoid LAPACK_dtpqrt2( lapack_int* m, lapack_int* n, lapack_int* l,\n                     double* a, lapack_int* lda,\n                     double* b, lapack_int* ldb,\n                     double* t, lapack_int* ldt,\n                     lapack_int *info );\nvoid LAPACK_ctpqrt2( lapack_int* m, lapack_int* n, lapack_int* l,\n                     lapack_complex_float* a, lapack_int* lda,\n                     lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* t, lapack_int* ldt,\n                     lapack_int *info );\nvoid LAPACK_ztpqrt2( lapack_int* m, lapack_int* n, lapack_int* l,\n                     lapack_complex_double* a, lapack_int* lda,\n                     lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* t, lapack_int* ldt,\n                     lapack_int *info );\nvoid LAPACK_stprfb( char* side, char* trans, char* direct, char* storev,\n                    lapack_int* m, lapack_int* n, lapack_int* k, lapack_int* l,\n                    const float* v, lapack_int* ldv, const float* t,\n                    lapack_int* ldt, float* a, lapack_int* lda, float* b,\n                    lapack_int* ldb, const float* work,\n                    lapack_int* ldwork );\nvoid LAPACK_dtprfb( char* side, char* trans, char* direct, char* storev,\n                    lapack_int* m, lapack_int* n, lapack_int* k, lapack_int* l,\n                    const double* v, lapack_int* ldv, const double* t,\n                    lapack_int* ldt, double* a, lapack_int* lda, double* b,\n                    lapack_int* ldb, const double* work,\n                    lapack_int* ldwork );\nvoid LAPACK_ctprfb( char* side, char* trans, char* direct, char* storev,\n                    lapack_int* m, lapack_int* n, lapack_int* k, lapack_int* l,\n                    const lapack_complex_float* v, lapack_int* ldv,\n                    const lapack_complex_float* t, lapack_int* ldt,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    const float* work, lapack_int* ldwork );\nvoid LAPACK_ztprfb( char* side, char* trans, char* direct, char* storev,\n                    lapack_int* m, lapack_int* n, lapack_int* k, lapack_int* l,\n                    const lapack_complex_double* v, lapack_int* ldv,\n                    const lapack_complex_double* t, lapack_int* ldt,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    const double* work, lapack_int* ldwork );\n// LAPACK 3.5.0\nvoid LAPACK_ssysv_rook( char* uplo, lapack_int* n, lapack_int* nrhs, float* a,\n                        lapack_int* lda, lapack_int* ipiv, float* b,\n                        lapack_int* ldb, float* work, lapack_int* lwork,\n                        lapack_int *info );\nvoid LAPACK_dsysv_rook( char* uplo, lapack_int* n, lapack_int* nrhs, double* a,\n                        lapack_int* lda, lapack_int* ipiv, double* b,\n                        lapack_int* ldb, double* work, lapack_int* lwork,\n                        lapack_int *info );\nvoid LAPACK_csysv_rook( char* uplo, lapack_int* n, lapack_int* nrhs,\n                        lapack_complex_float* a, lapack_int* lda,\n                        lapack_int* ipiv, lapack_complex_float* b,\n                        lapack_int* ldb, lapack_complex_float* work,\n                        lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zsysv_rook( char* uplo, lapack_int* n, lapack_int* nrhs,\n                        lapack_complex_double* a, lapack_int* lda,\n                        lapack_int* ipiv, lapack_complex_double* b,\n                        lapack_int* ldb, lapack_complex_double* work,\n                        lapack_int* lwork, lapack_int *info );\nvoid LAPACK_csyr( char* uplo, lapack_int* n, lapack_complex_float* alpha,\n                      const lapack_complex_float* x, lapack_int* incx,\n                      lapack_complex_float* a, lapack_int* lda );\nvoid LAPACK_zsyr( char* uplo, lapack_int* n, lapack_complex_double* alpha,\n                      const lapack_complex_double* x, lapack_int* incx,\n                      lapack_complex_double* a, lapack_int* lda );\nvoid LAPACK_ilaver( const lapack_int* vers_major, const lapack_int* vers_minor,\n                     const lapack_int* vers_patch );\n\n#ifdef __cplusplus\n}\n#endif /* __cplusplus */\n\n#endif /* _LAPACKE_H_ */\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/include/lapacke_config.h",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK\n* Author: Intel Corporation\n* Generated May, 2011\n*****************************************************************************/\n\n#ifndef _LAPACKE_CONFIG_H_\n#define _LAPACKE_CONFIG_H_\n\n#ifdef __cplusplus\n#if defined(LAPACK_COMPLEX_CPP)\n#include <complex>\n#endif\nextern \"C\" {\n#endif /* __cplusplus */\n\n#include <stdlib.h>\n\n#ifndef lapack_int\n#if defined(LAPACK_ILP64)\n#define lapack_int              long\n#else\n#define lapack_int              int\n#endif\n#endif\n\n#ifndef lapack_logical\n#define lapack_logical          lapack_int\n#endif\n\n#ifndef LAPACK_COMPLEX_CUSTOM\n\n#if defined(LAPACK_COMPLEX_STRUCTURE)\n\ntypedef struct { float real, imag; } _lapack_complex_float;\ntypedef struct { double real, imag; } _lapack_complex_double;\n#define lapack_complex_float  _lapack_complex_float\n#define lapack_complex_double _lapack_complex_double\n#define lapack_complex_float_real(z)  ((z).real)\n#define lapack_complex_float_imag(z)  ((z).imag)\n#define lapack_complex_double_real(z)  ((z).real)\n#define lapack_complex_double_imag(z)  ((z).imag)\n\n#elif defined(LAPACK_COMPLEX_C99)\n\n#include <complex.h>\n#define lapack_complex_float    float _Complex\n#define lapack_complex_double   double _Complex\n#define lapack_complex_float_real(z)       (creal(z))\n#define lapack_complex_float_imag(z)       (cimag(z))\n#define lapack_complex_double_real(z)       (creal(z))\n#define lapack_complex_double_imag(z)       (cimag(z))\n\n#elif defined(LAPACK_COMPLEX_CPP)\n\n#define lapack_complex_float std::complex<float>\n#define lapack_complex_double std::complex<double>\n#define lapack_complex_float_real(z)       ((z).real())\n#define lapack_complex_float_imag(z)       ((z).imag())\n#define lapack_complex_double_real(z)       ((z).real())\n#define lapack_complex_double_imag(z)       ((z).imag())\n\n#else\n\n#include <complex.h>\n#define lapack_complex_float    float _Complex\n#define lapack_complex_double   double _Complex\n#define lapack_complex_float_real(z)       (creal(z))\n#define lapack_complex_float_imag(z)       (cimag(z))\n#define lapack_complex_double_real(z)       (creal(z))\n#define lapack_complex_double_imag(z)       (cimag(z))\n\n#endif\n\nlapack_complex_float lapack_make_complex_float( float re, float im );\nlapack_complex_double lapack_make_complex_double( double re, double im );\n\n#endif\n\n#ifndef LAPACK_malloc\n#define LAPACK_malloc( size )   malloc( size )\n#endif\n\n#ifndef LAPACK_free\n#define LAPACK_free( p )        free( p )\n#endif\n\n#ifdef __cplusplus\n}\n#endif /* __cplusplus */\n\n#endif /* _LAPACKE_CONFIG_H_ */\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/include/lapacke_mangling.h",
    "content": "#ifndef LAPACK_HEADER_INCLUDED\n#define LAPACK_HEADER_INCLUDED\n\n#ifndef LAPACK_GLOBAL\n#if defined(LAPACK_GLOBAL_PATTERN_LC) || defined(ADD_)\n#define LAPACK_GLOBAL(lcname,UCNAME)  lcname##_\n#elif defined(LAPACK_GLOBAL_PATTERN_UC) || defined(UPPER)\n#define LAPACK_GLOBAL(lcname,UCNAME)  UCNAME\n#elif defined(LAPACK_GLOBAL_PATTERN_MC) || defined(NOCHANGE)\n#define LAPACK_GLOBAL(lcname,UCNAME)  lcname\n#else\n#define LAPACK_GLOBAL(lcname,UCNAME)  lcname##_\n#endif\n#endif\n\n#endif\n\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/include/lapacke_mangling_with_flags.h",
    "content": "#ifndef LAPACK_HEADER_INCLUDED\n#define LAPACK_HEADER_INCLUDED\n\n#ifndef LAPACK_GLOBAL\n#if defined(LAPACK_GLOBAL_PATTERN_LC) || defined(ADD_)\n#define LAPACK_GLOBAL(lcname,UCNAME)  lcname##_\n#elif defined(LAPACK_GLOBAL_PATTERN_UC) || defined(UPPER)\n#define LAPACK_GLOBAL(lcname,UCNAME)  UCNAME\n#elif defined(LAPACK_GLOBAL_PATTERN_MC) || defined(NOCHANGE)\n#define LAPACK_GLOBAL(lcname,UCNAME)  lcname\n#else\n#define LAPACK_GLOBAL(lcname,UCNAME)  lcname##_\n#endif\n#endif\n\n#endif\n\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/include/lapacke_utils.h",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility functions\n* Author: Intel Corporation\n* Created in January, 2010\n*****************************************************************************/\n\n#ifndef _LAPACKE_UTILS_H_\n#define _LAPACKE_UTILS_H_\n\n#include \"lapacke.h\"\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif /* __cplusplus */\n\n#ifndef ABS\n#define ABS(x) (((x) < 0) ? -(x) : (x))\n#endif\n#ifndef MAX\n#define MAX(x,y) (((x) > (y)) ? (x) : (y))\n#endif\n#ifndef MIN\n#define MIN(x,y) (((x) < (y)) ? (x) : (y))\n#endif\n#ifndef MAX3\n#define MAX3(x,y,z) (((x) > MAX(y,z)) ? (x) : MAX(y,z))\n#endif\n#ifndef MIN3\n#define MIN3(x,y,z) (((x) < MIN(y,z)) ? (x) : MIN(y,z))\n#endif\n\n#define IS_S_NONZERO(x) ( (x) < 0 || (x) > 0 )\n#define IS_D_NONZERO(x) ( (x) < 0 || (x) > 0 )\n#define IS_C_NONZERO(x) ( IS_S_NONZERO(*((float*)&x)) ||  \\\n                          IS_S_NONZERO(*(((float*)&x)+1)) )\n#define IS_Z_NONZERO(x) ( IS_D_NONZERO(*((double*)&x)) || \\\n                          IS_D_NONZERO(*(((double*)&x)+1)) )\n\n/* Error handler */\nvoid LAPACKE_xerbla( const char *name, lapack_int info );\n\n/* Compare two chars (case-insensitive) */\nlapack_logical LAPACKE_lsame( char ca,  char cb );\n\n/* Functions to convert column-major to row-major 2d arrays and vice versa. */\nvoid LAPACKE_cgb_trans( int matrix_order, lapack_int m, lapack_int n,\n                        lapack_int kl, lapack_int ku,\n                        const lapack_complex_float *in, lapack_int ldin,\n                        lapack_complex_float *out, lapack_int ldout );\nvoid LAPACKE_cge_trans( int matrix_order, lapack_int m, lapack_int n,\n                        const lapack_complex_float* in, lapack_int ldin,\n                        lapack_complex_float* out, lapack_int ldout );\nvoid LAPACKE_cgg_trans( int matrix_order, lapack_int m, lapack_int n,\n                        const lapack_complex_float* in, lapack_int ldin,\n                        lapack_complex_float* out, lapack_int ldout );\nvoid LAPACKE_chb_trans( int matrix_order, char uplo, lapack_int n,\n                        lapack_int kd,\n                        const lapack_complex_float *in, lapack_int ldin,\n                        lapack_complex_float *out, lapack_int ldout );\nvoid LAPACKE_che_trans( int matrix_order, char uplo, lapack_int n,\n                        const lapack_complex_float *in, lapack_int ldin,\n                        lapack_complex_float *out, lapack_int ldout );\nvoid LAPACKE_chp_trans( int matrix_order, char uplo, lapack_int n,\n                        const lapack_complex_float *in,\n                        lapack_complex_float *out );\nvoid LAPACKE_chs_trans( int matrix_order, lapack_int n,\n                        const lapack_complex_float *in, lapack_int ldin,\n                        lapack_complex_float *out, lapack_int ldout );\nvoid LAPACKE_cpb_trans( int matrix_order, char uplo, lapack_int n,\n                        lapack_int kd,\n                        const lapack_complex_float *in, lapack_int ldin,\n                        lapack_complex_float *out, lapack_int ldout );\nvoid LAPACKE_cpf_trans( int matrix_order, char transr, char uplo,\n                        lapack_int n, const lapack_complex_float *in,\n                        lapack_complex_float *out );\nvoid LAPACKE_cpo_trans( int matrix_order, char uplo, lapack_int n,\n                        const lapack_complex_float *in, lapack_int ldin,\n                        lapack_complex_float *out, lapack_int ldout );\nvoid LAPACKE_cpp_trans( int matrix_order, char uplo, lapack_int n,\n                        const lapack_complex_float *in,\n                        lapack_complex_float *out );\nvoid LAPACKE_csp_trans( int matrix_order, char uplo, lapack_int n,\n                        const lapack_complex_float *in,\n                        lapack_complex_float *out );\nvoid LAPACKE_csy_trans( int matrix_order, char uplo, lapack_int n,\n                        const lapack_complex_float *in, lapack_int ldin,\n                        lapack_complex_float *out, lapack_int ldout );\nvoid LAPACKE_ctb_trans( int matrix_order, char uplo, char diag,\n                        lapack_int n, lapack_int kd,\n                        const lapack_complex_float *in, lapack_int ldin,\n                        lapack_complex_float *out, lapack_int ldout );\nvoid LAPACKE_ctf_trans( int matrix_order, char transr, char uplo, char diag,\n                        lapack_int n, const lapack_complex_float *in,\n                        lapack_complex_float *out );\nvoid LAPACKE_ctp_trans( int matrix_order, char uplo, char diag,\n                        lapack_int n, const lapack_complex_float *in,\n                        lapack_complex_float *out );\nvoid LAPACKE_ctr_trans( int matrix_order, char uplo, char diag, lapack_int n,\n                        const lapack_complex_float *in, lapack_int ldin,\n                        lapack_complex_float *out, lapack_int ldout );\n\nvoid LAPACKE_dgb_trans( int matrix_order, lapack_int m, lapack_int n,\n                        lapack_int kl, lapack_int ku,\n                        const double *in, lapack_int ldin,\n                        double *out, lapack_int ldout );\nvoid LAPACKE_dge_trans( int matrix_order, lapack_int m, lapack_int n,\n                        const double* in, lapack_int ldin,\n                        double* out, lapack_int ldout );\nvoid LAPACKE_dgg_trans( int matrix_order, lapack_int m, lapack_int n,\n                        const double* in, lapack_int ldin,\n                        double* out, lapack_int ldout );\nvoid LAPACKE_dhs_trans( int matrix_order, lapack_int n,\n                        const double *in, lapack_int ldin,\n                        double *out, lapack_int ldout );\nvoid LAPACKE_dpb_trans( int matrix_order, char uplo, lapack_int n,\n                        lapack_int kd,\n                        const double *in, lapack_int ldin,\n                        double *out, lapack_int ldout );\nvoid LAPACKE_dpf_trans( int matrix_order, char transr, char uplo,\n                        lapack_int n, const double *in,\n                        double *out );\nvoid LAPACKE_dpo_trans( int matrix_order, char uplo, lapack_int n,\n                        const double *in, lapack_int ldin,\n                        double *out, lapack_int ldout );\nvoid LAPACKE_dpp_trans( int matrix_order, char uplo, lapack_int n,\n                        const double *in,\n                        double *out );\nvoid LAPACKE_dsb_trans( int matrix_order, char uplo, lapack_int n,\n                        lapack_int kd,\n                        const double *in, lapack_int ldin,\n                        double *out, lapack_int ldout );\nvoid LAPACKE_dsp_trans( int matrix_order, char uplo, lapack_int n,\n                        const double *in,\n                        double *out );\nvoid LAPACKE_dsy_trans( int matrix_order, char uplo, lapack_int n,\n                        const double *in, lapack_int ldin,\n                        double *out, lapack_int ldout );\nvoid LAPACKE_dtb_trans( int matrix_order, char uplo, char diag,\n                        lapack_int n, lapack_int kd,\n                        const double *in, lapack_int ldin,\n                        double *out, lapack_int ldout );\nvoid LAPACKE_dtf_trans( int matrix_order, char transr, char uplo, char diag,\n                        lapack_int n, const double *in,\n                        double *out );\nvoid LAPACKE_dtp_trans( int matrix_order, char uplo, char diag,\n                        lapack_int n, const double *in,\n                        double *out );\nvoid LAPACKE_dtr_trans( int matrix_order, char uplo, char diag, lapack_int n,\n                        const double *in, lapack_int ldin,\n                        double *out, lapack_int ldout );\n\nvoid LAPACKE_sgb_trans( int matrix_order, lapack_int m, lapack_int n,\n                        lapack_int kl, lapack_int ku,\n                        const float *in, lapack_int ldin,\n                        float *out, lapack_int ldout );\nvoid LAPACKE_sge_trans( int matrix_order, lapack_int m, lapack_int n,\n                        const float* in, lapack_int ldin,\n                        float* out, lapack_int ldout );\nvoid LAPACKE_sgg_trans( int matrix_order, lapack_int m, lapack_int n,\n                        const float* in, lapack_int ldin,\n                        float* out, lapack_int ldout );\nvoid LAPACKE_shs_trans( int matrix_order, lapack_int n,\n                        const float *in, lapack_int ldin,\n                        float *out, lapack_int ldout );\nvoid LAPACKE_spb_trans( int matrix_order, char uplo, lapack_int n,\n                        lapack_int kd,\n                        const float *in, lapack_int ldin,\n                        float *out, lapack_int ldout );\nvoid LAPACKE_spf_trans( int matrix_order, char transr, char uplo,\n                        lapack_int n, const float *in,\n                        float *out );\nvoid LAPACKE_spo_trans( int matrix_order, char uplo, lapack_int n,\n                        const float *in, lapack_int ldin,\n                        float *out, lapack_int ldout );\nvoid LAPACKE_spp_trans( int matrix_order, char uplo, lapack_int n,\n                        const float *in,\n                        float *out );\nvoid LAPACKE_ssb_trans( int matrix_order, char uplo, lapack_int n,\n                        lapack_int kd,\n                        const float *in, lapack_int ldin,\n                        float *out, lapack_int ldout );\nvoid LAPACKE_ssp_trans( int matrix_order, char uplo, lapack_int n,\n                        const float *in,\n                        float *out );\nvoid LAPACKE_ssy_trans( int matrix_order, char uplo, lapack_int n,\n                        const float *in, lapack_int ldin,\n                        float *out, lapack_int ldout );\nvoid LAPACKE_stb_trans( int matrix_order, char uplo, char diag,\n                        lapack_int n, lapack_int kd,\n                        const float *in, lapack_int ldin,\n                        float *out, lapack_int ldout );\nvoid LAPACKE_stf_trans( int matrix_order, char transr, char uplo, char diag,\n                        lapack_int n, const float *in,\n                        float *out );\nvoid LAPACKE_stp_trans( int matrix_order, char uplo, char diag,\n                        lapack_int n, const float *in,\n                        float *out );\nvoid LAPACKE_str_trans( int matrix_order, char uplo, char diag, lapack_int n,\n                        const float *in, lapack_int ldin,\n                        float *out, lapack_int ldout );\n\nvoid LAPACKE_zgb_trans( int matrix_order, lapack_int m, lapack_int n,\n                        lapack_int kl, lapack_int ku,\n                        const lapack_complex_double *in, lapack_int ldin,\n                        lapack_complex_double *out, lapack_int ldout );\nvoid LAPACKE_zge_trans( int matrix_order, lapack_int m, lapack_int n,\n                        const lapack_complex_double* in, lapack_int ldin,\n                        lapack_complex_double* out, lapack_int ldout );\nvoid LAPACKE_zgg_trans( int matrix_order, lapack_int m, lapack_int n,\n                        const lapack_complex_double* in, lapack_int ldin,\n                        lapack_complex_double* out, lapack_int ldout );\nvoid LAPACKE_zhb_trans( int matrix_order, char uplo, lapack_int n,\n                        lapack_int kd,\n                        const lapack_complex_double *in, lapack_int ldin,\n                        lapack_complex_double *out, lapack_int ldout );\nvoid LAPACKE_zhe_trans( int matrix_order, char uplo, lapack_int n,\n                        const lapack_complex_double *in, lapack_int ldin,\n                        lapack_complex_double *out, lapack_int ldout );\nvoid LAPACKE_zhp_trans( int matrix_order, char uplo, lapack_int n,\n                        const lapack_complex_double *in,\n                        lapack_complex_double *out );\nvoid LAPACKE_zhs_trans( int matrix_order, lapack_int n,\n                        const lapack_complex_double *in, lapack_int ldin,\n                        lapack_complex_double *out, lapack_int ldout );\nvoid LAPACKE_zpb_trans( int matrix_order, char uplo, lapack_int n,\n                        lapack_int kd,\n                        const lapack_complex_double *in, lapack_int ldin,\n                        lapack_complex_double *out, lapack_int ldout );\nvoid LAPACKE_zpf_trans( int matrix_order, char transr, char uplo,\n                        lapack_int n, const lapack_complex_double *in,\n                        lapack_complex_double *out );\nvoid LAPACKE_zpo_trans( int matrix_order, char uplo, lapack_int n,\n                        const lapack_complex_double *in, lapack_int ldin,\n                        lapack_complex_double *out, lapack_int ldout );\nvoid LAPACKE_zpp_trans( int matrix_order, char uplo, lapack_int n,\n                        const lapack_complex_double *in,\n                        lapack_complex_double *out );\nvoid LAPACKE_zsp_trans( int matrix_order, char uplo, lapack_int n,\n                        const lapack_complex_double *in,\n                        lapack_complex_double *out );\nvoid LAPACKE_zsy_trans( int matrix_order, char uplo, lapack_int n,\n                        const lapack_complex_double *in, lapack_int ldin,\n                        lapack_complex_double *out, lapack_int ldout );\nvoid LAPACKE_ztb_trans( int matrix_order, char uplo, char diag,\n                        lapack_int n, lapack_int kd,\n                        const lapack_complex_double *in, lapack_int ldin,\n                        lapack_complex_double *out, lapack_int ldout );\nvoid LAPACKE_ztf_trans( int matrix_order, char transr, char uplo, char diag,\n                        lapack_int n, const lapack_complex_double *in,\n                        lapack_complex_double *out );\nvoid LAPACKE_ztp_trans( int matrix_order, char uplo, char diag,\n                        lapack_int n, const lapack_complex_double *in,\n                        lapack_complex_double *out );\nvoid LAPACKE_ztr_trans( int matrix_order, char uplo, char diag, lapack_int n,\n                        const lapack_complex_double *in, lapack_int ldin,\n                        lapack_complex_double *out, lapack_int ldout );\n\n/* NaN checkers */\n#define LAPACK_SISNAN( x ) ( x != x )\n#define LAPACK_DISNAN( x ) ( x != x )\n#define LAPACK_CISNAN( x ) ( LAPACK_SISNAN(*((float*) &x)) || \\\n                              LAPACK_SISNAN(*(((float*) &x)+1)) )\n#define LAPACK_ZISNAN( x ) ( LAPACK_DISNAN(*((double*)&x)) || \\\n                              LAPACK_DISNAN(*(((double*)&x)+1)) )\n\n/* NaN checkers for vectors */\nlapack_logical LAPACKE_c_nancheck( lapack_int n,\n                                    const lapack_complex_float *x,\n                                    lapack_int incx );\nlapack_logical LAPACKE_d_nancheck( lapack_int n,\n                                    const double *x,\n                                    lapack_int incx );\nlapack_logical LAPACKE_s_nancheck( lapack_int n,\n                                    const float *x,\n                                    lapack_int incx );\nlapack_logical LAPACKE_z_nancheck( lapack_int n,\n                                    const lapack_complex_double *x,\n                                    lapack_int incx );\n/* NaN checkers for matrices */\nlapack_logical LAPACKE_cgb_nancheck( int matrix_order, lapack_int m,\n                                      lapack_int n, lapack_int kl,\n                                      lapack_int ku,\n                                      const lapack_complex_float *ab,\n                                      lapack_int ldab );\nlapack_logical LAPACKE_cge_nancheck( int matrix_order, lapack_int m,\n                                      lapack_int n,\n                                      const lapack_complex_float *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_cgg_nancheck( int matrix_order, lapack_int m,\n                                      lapack_int n,\n                                      const lapack_complex_float *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_cgt_nancheck( lapack_int n,\n                                      const lapack_complex_float *dl,\n                                      const lapack_complex_float *d,\n                                      const lapack_complex_float *du );\nlapack_logical LAPACKE_chb_nancheck( int matrix_order, char uplo,\n                                      lapack_int n, lapack_int kd,\n                                      const lapack_complex_float* ab,\n                                      lapack_int ldab );\nlapack_logical LAPACKE_che_nancheck( int matrix_order, char uplo,\n                                      lapack_int n,\n                                      const lapack_complex_float *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_chp_nancheck( lapack_int n,\n                                      const lapack_complex_float *ap );\nlapack_logical LAPACKE_chs_nancheck( int matrix_order, lapack_int n,\n                                      const lapack_complex_float *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_cpb_nancheck( int matrix_order, char uplo,\n                                      lapack_int n, lapack_int kd,\n                                      const lapack_complex_float* ab,\n                                      lapack_int ldab );\nlapack_logical LAPACKE_cpf_nancheck( lapack_int n,\n                                      const lapack_complex_float *a );\nlapack_logical LAPACKE_cpo_nancheck( int matrix_order, char uplo,\n                                      lapack_int n,\n                                      const lapack_complex_float *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_cpp_nancheck( lapack_int n,\n                                      const lapack_complex_float *ap );\nlapack_logical LAPACKE_cpt_nancheck( lapack_int n,\n                                      const float *d,\n                                      const lapack_complex_float *e );\nlapack_logical LAPACKE_csp_nancheck( lapack_int n,\n                                      const lapack_complex_float *ap );\nlapack_logical LAPACKE_cst_nancheck( lapack_int n,\n                                      const lapack_complex_float *d,\n                                      const lapack_complex_float *e );\nlapack_logical LAPACKE_csy_nancheck( int matrix_order, char uplo,\n                                      lapack_int n,\n                                      const lapack_complex_float *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_ctb_nancheck( int matrix_order, char uplo, char diag,\n                                      lapack_int n, lapack_int kd,\n                                      const lapack_complex_float* ab,\n                                      lapack_int ldab );\nlapack_logical LAPACKE_ctf_nancheck( int matrix_order, char transr,\n                                      char uplo, char diag,\n                                      lapack_int n,\n                                      const lapack_complex_float *a );\nlapack_logical LAPACKE_ctp_nancheck( int matrix_order, char uplo, char diag,\n                                      lapack_int n,\n                                      const lapack_complex_float *ap );\nlapack_logical LAPACKE_ctr_nancheck( int matrix_order, char uplo, char diag,\n                                      lapack_int n,\n                                      const lapack_complex_float *a,\n                                      lapack_int lda );\n\nlapack_logical LAPACKE_dgb_nancheck( int matrix_order, lapack_int m,\n                                      lapack_int n, lapack_int kl,\n                                      lapack_int ku,\n                                      const double *ab,\n                                      lapack_int ldab );\nlapack_logical LAPACKE_dge_nancheck( int matrix_order, lapack_int m,\n                                      lapack_int n,\n                                      const double *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_dgg_nancheck( int matrix_order, lapack_int m,\n                                      lapack_int n,\n                                      const double *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_dgt_nancheck( lapack_int n,\n                                      const double *dl,\n                                      const double *d,\n                                      const double *du );\nlapack_logical LAPACKE_dhs_nancheck( int matrix_order, lapack_int n,\n                                      const double *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_dpb_nancheck( int matrix_order, char uplo,\n                                      lapack_int n, lapack_int kd,\n                                      const double* ab,\n                                      lapack_int ldab );\nlapack_logical LAPACKE_dpf_nancheck( lapack_int n,\n                                      const double *a );\nlapack_logical LAPACKE_dpo_nancheck( int matrix_order, char uplo,\n                                      lapack_int n,\n                                      const double *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_dpp_nancheck( lapack_int n,\n                                      const double *ap );\nlapack_logical LAPACKE_dpt_nancheck( lapack_int n,\n                                      const double *d,\n                                      const double *e );\nlapack_logical LAPACKE_dsb_nancheck( int matrix_order, char uplo,\n                                      lapack_int n, lapack_int kd,\n                                      const double* ab,\n                                      lapack_int ldab );\nlapack_logical LAPACKE_dsp_nancheck( lapack_int n,\n                                      const double *ap );\nlapack_logical LAPACKE_dst_nancheck( lapack_int n,\n                                      const double *d,\n                                      const double *e );\nlapack_logical LAPACKE_dsy_nancheck( int matrix_order, char uplo,\n                                      lapack_int n,\n                                      const double *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_dtb_nancheck( int matrix_order, char uplo, char diag,\n                                      lapack_int n, lapack_int kd,\n                                      const double* ab,\n                                      lapack_int ldab );\nlapack_logical LAPACKE_dtf_nancheck( int matrix_order, char transr,\n                                      char uplo, char diag,\n                                      lapack_int n,\n                                      const double *a );\nlapack_logical LAPACKE_dtp_nancheck( int matrix_order, char uplo, char diag,\n                                      lapack_int n,\n                                      const double *ap );\nlapack_logical LAPACKE_dtr_nancheck( int matrix_order, char uplo, char diag,\n                                      lapack_int n,\n                                      const double *a,\n                                      lapack_int lda );\n\nlapack_logical LAPACKE_sgb_nancheck( int matrix_order, lapack_int m,\n                                      lapack_int n, lapack_int kl,\n                                      lapack_int ku,\n                                      const float *ab,\n                                      lapack_int ldab );\nlapack_logical LAPACKE_sge_nancheck( int matrix_order, lapack_int m,\n                                      lapack_int n,\n                                      const float *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_sgg_nancheck( int matrix_order, lapack_int m,\n                                      lapack_int n,\n                                      const float *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_sgt_nancheck( lapack_int n,\n                                      const float *dl,\n                                      const float *d,\n                                      const float *du );\nlapack_logical LAPACKE_shs_nancheck( int matrix_order, lapack_int n,\n                                      const float *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_spb_nancheck( int matrix_order, char uplo,\n                                      lapack_int n, lapack_int kd,\n                                      const float* ab,\n                                      lapack_int ldab );\nlapack_logical LAPACKE_spf_nancheck( lapack_int n,\n                                      const float *a );\nlapack_logical LAPACKE_spo_nancheck( int matrix_order, char uplo,\n                                      lapack_int n,\n                                      const float *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_spp_nancheck( lapack_int n,\n                                      const float *ap );\nlapack_logical LAPACKE_spt_nancheck( lapack_int n,\n                                      const float *d,\n                                      const float *e );\nlapack_logical LAPACKE_ssb_nancheck( int matrix_order, char uplo,\n                                      lapack_int n, lapack_int kd,\n                                      const float* ab,\n                                      lapack_int ldab );\nlapack_logical LAPACKE_ssp_nancheck( lapack_int n,\n                                      const float *ap );\nlapack_logical LAPACKE_sst_nancheck( lapack_int n,\n                                      const float *d,\n                                      const float *e );\nlapack_logical LAPACKE_ssy_nancheck( int matrix_order, char uplo,\n                                      lapack_int n,\n                                      const float *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_stb_nancheck( int matrix_order, char uplo, char diag,\n                                      lapack_int n, lapack_int kd,\n                                      const float* ab,\n                                      lapack_int ldab );\nlapack_logical LAPACKE_stf_nancheck( int matrix_order, char transr,\n                                      char uplo, char diag,\n                                      lapack_int n,\n                                      const float *a );\nlapack_logical LAPACKE_stp_nancheck( int matrix_order, char uplo, char diag,\n                                      lapack_int n,\n                                      const float *ap );\nlapack_logical LAPACKE_str_nancheck( int matrix_order, char uplo, char diag,\n                                      lapack_int n,\n                                      const float *a,\n                                      lapack_int lda );\n\nlapack_logical LAPACKE_zgb_nancheck( int matrix_order, lapack_int m,\n                                      lapack_int n, lapack_int kl,\n                                      lapack_int ku,\n                                      const lapack_complex_double *ab,\n                                      lapack_int ldab );\nlapack_logical LAPACKE_zge_nancheck( int matrix_order, lapack_int m,\n                                      lapack_int n,\n                                      const lapack_complex_double *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_zgg_nancheck( int matrix_order, lapack_int m,\n                                      lapack_int n,\n                                      const lapack_complex_double *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_zgt_nancheck( lapack_int n,\n                                      const lapack_complex_double *dl,\n                                      const lapack_complex_double *d,\n                                      const lapack_complex_double *du );\nlapack_logical LAPACKE_zhb_nancheck( int matrix_order, char uplo,\n                                      lapack_int n, lapack_int kd,\n                                      const lapack_complex_double* ab,\n                                      lapack_int ldab );\nlapack_logical LAPACKE_zhe_nancheck( int matrix_order, char uplo,\n                                      lapack_int n,\n                                      const lapack_complex_double *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_zhp_nancheck( lapack_int n,\n                                      const lapack_complex_double *ap );\nlapack_logical LAPACKE_zhs_nancheck( int matrix_order, lapack_int n,\n                                      const lapack_complex_double *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_zpb_nancheck( int matrix_order, char uplo,\n                                      lapack_int n, lapack_int kd,\n                                      const lapack_complex_double* ab,\n                                      lapack_int ldab );\nlapack_logical LAPACKE_zpf_nancheck( lapack_int n,\n                                      const lapack_complex_double *a );\nlapack_logical LAPACKE_zpo_nancheck( int matrix_order, char uplo,\n                                      lapack_int n,\n                                      const lapack_complex_double *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_zpp_nancheck( lapack_int n,\n                                      const lapack_complex_double *ap );\nlapack_logical LAPACKE_zpt_nancheck( lapack_int n,\n                                      const double *d,\n                                      const lapack_complex_double *e );\nlapack_logical LAPACKE_zsp_nancheck( lapack_int n,\n                                      const lapack_complex_double *ap );\nlapack_logical LAPACKE_zst_nancheck( lapack_int n,\n                                      const lapack_complex_double *d,\n                                      const lapack_complex_double *e );\nlapack_logical LAPACKE_zsy_nancheck( int matrix_order, char uplo,\n                                      lapack_int n,\n                                      const lapack_complex_double *a,\n                                      lapack_int lda );\nlapack_logical LAPACKE_ztb_nancheck( int matrix_order, char uplo, char diag,\n                                      lapack_int n, lapack_int kd,\n                                      const lapack_complex_double* ab,\n                                      lapack_int ldab );\nlapack_logical LAPACKE_ztf_nancheck( int matrix_order, char transr,\n                                      char uplo, char diag,\n                                      lapack_int n,\n                                      const lapack_complex_double *a );\nlapack_logical LAPACKE_ztp_nancheck( int matrix_order, char uplo, char diag,\n                                      lapack_int n,\n                                      const lapack_complex_double *ap );\nlapack_logical LAPACKE_ztr_nancheck( int matrix_order, char uplo, char diag,\n                                      lapack_int n,\n                                      const lapack_complex_double *a,\n                                      lapack_int lda );\n\n#ifdef __cplusplus\n}\n#endif /* __cplusplus */\n\n#endif  /* _LAPACKE_UTILS_H_ */\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cgeev.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function cgeev\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cgeev( int matrix_order, char jobvl, char jobvr,\n                          lapack_int n, lapack_complex_float* a, lapack_int lda,\n                          lapack_complex_float* w, lapack_complex_float* vl,\n                          lapack_int ldvl, lapack_complex_float* vr,\n                          lapack_int ldvr )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    float* rwork = NULL;\n    lapack_complex_float* work = NULL;\n    lapack_complex_float work_query;\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_cgeev\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_cge_nancheck( matrix_order, n, n, a, lda ) ) {\n        return -5;\n    }\n#endif\n    /* Allocate memory for working array(s) */\n    rwork = (float*)LAPACKE_malloc( sizeof(float) * MAX(1,2*n) );\n    if( rwork == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Query optimal working array(s) size */\n    info = LAPACKE_cgeev_work( matrix_order, jobvl, jobvr, n, a, lda, w, vl,\n                               ldvl, vr, ldvr, &work_query, lwork, rwork );\n    if( info != 0 ) {\n        goto exit_level_1;\n    }\n    lwork = LAPACK_C2INT( work_query );\n    /* Allocate memory for work arrays */\n    work = (lapack_complex_float*)\n        LAPACKE_malloc( sizeof(lapack_complex_float) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_1;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_cgeev_work( matrix_order, jobvl, jobvr, n, a, lda, w, vl,\n                               ldvl, vr, ldvr, work, lwork, rwork );\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_1:\n    LAPACKE_free( rwork );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_cgeev\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cgeev_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function cgeev\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cgeev_work( int matrix_order, char jobvl, char jobvr,\n                               lapack_int n, lapack_complex_float* a,\n                               lapack_int lda, lapack_complex_float* w,\n                               lapack_complex_float* vl, lapack_int ldvl,\n                               lapack_complex_float* vr, lapack_int ldvr,\n                               lapack_complex_float* work, lapack_int lwork,\n                               float* rwork )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_cgeev( &jobvl, &jobvr, &n, a, &lda, w, vl, &ldvl, vr, &ldvr,\n                      work, &lwork, rwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        lapack_int ldvl_t = MAX(1,n);\n        lapack_int ldvr_t = MAX(1,n);\n        lapack_complex_float* a_t = NULL;\n        lapack_complex_float* vl_t = NULL;\n        lapack_complex_float* vr_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -6;\n            LAPACKE_xerbla( \"LAPACKE_cgeev_work\", info );\n            return info;\n        }\n        if( ldvl < n ) {\n            info = -9;\n            LAPACKE_xerbla( \"LAPACKE_cgeev_work\", info );\n            return info;\n        }\n        if( ldvr < n ) {\n            info = -11;\n            LAPACKE_xerbla( \"LAPACKE_cgeev_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_cgeev( &jobvl, &jobvr, &n, a, &lda_t, w, vl, &ldvl_t, vr,\n                          &ldvr_t, work, &lwork, rwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_float*)\n            LAPACKE_malloc( sizeof(lapack_complex_float) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        if( LAPACKE_lsame( jobvl, 'v' ) ) {\n            vl_t = (lapack_complex_float*)\n                LAPACKE_malloc( sizeof(lapack_complex_float) *\n                                ldvl_t * MAX(1,n) );\n            if( vl_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_1;\n            }\n        }\n        if( LAPACKE_lsame( jobvr, 'v' ) ) {\n            vr_t = (lapack_complex_float*)\n                LAPACKE_malloc( sizeof(lapack_complex_float) *\n                                ldvr_t * MAX(1,n) );\n            if( vr_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_2;\n            }\n        }\n        /* Transpose input matrices */\n        LAPACKE_cge_trans( matrix_order, n, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_cgeev( &jobvl, &jobvr, &n, a_t, &lda_t, w, vl_t, &ldvl_t, vr_t,\n                      &ldvr_t, work, &lwork, rwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_cge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda );\n        if( LAPACKE_lsame( jobvl, 'v' ) ) {\n            LAPACKE_cge_trans( LAPACK_COL_MAJOR, n, n, vl_t, ldvl_t, vl, ldvl );\n        }\n        if( LAPACKE_lsame( jobvr, 'v' ) ) {\n            LAPACKE_cge_trans( LAPACK_COL_MAJOR, n, n, vr_t, ldvr_t, vr, ldvr );\n        }\n        /* Release memory and exit */\n        if( LAPACKE_lsame( jobvr, 'v' ) ) {\n            LAPACKE_free( vr_t );\n        }\nexit_level_2:\n        if( LAPACKE_lsame( jobvl, 'v' ) ) {\n            LAPACKE_free( vl_t );\n        }\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_cgeev_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_cgeev_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cgeqrf.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2014, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function cgeqrf\n* Author: Intel Corporation\n* Generated November 2015\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cgeqrf( int matrix_layout, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* tau )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    lapack_complex_float* work = NULL;\n    lapack_complex_float work_query;\n    if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_cgeqrf\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_cge_nancheck( matrix_layout, m, n, a, lda ) ) {\n        return -4;\n    }\n#endif\n    /* Query optimal working array(s) size */\n    info = LAPACKE_cgeqrf_work( matrix_layout, m, n, a, lda, tau, &work_query,\n                                lwork );\n    if( info != 0 ) {\n        goto exit_level_0;\n    }\n    lwork = LAPACK_C2INT( work_query );\n    /* Allocate memory for work arrays */\n    work = (lapack_complex_float*)\n        LAPACKE_malloc( sizeof(lapack_complex_float) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_cgeqrf_work( matrix_layout, m, n, a, lda, tau, work, lwork );\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_cgeqrf\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cgeqrf_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2014, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function cgeqrf\n* Author: Intel Corporation\n* Generated November 2015\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cgeqrf_work( int matrix_layout, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork )\n{\n    lapack_int info = 0;\n    if( matrix_layout == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_cgeqrf( &m, &n, a, &lda, tau, work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_layout == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,m);\n        lapack_complex_float* a_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -5;\n            LAPACKE_xerbla( \"LAPACKE_cgeqrf_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_cgeqrf( &m, &n, a, &lda_t, tau, work, &lwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_float*)\n            LAPACKE_malloc( sizeof(lapack_complex_float) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        /* Transpose input matrices */\n        LAPACKE_cge_trans( matrix_layout, m, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_cgeqrf( &m, &n, a_t, &lda_t, tau, work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_cge_trans( LAPACK_COL_MAJOR, m, n, a_t, lda_t, a, lda );\n        /* Release memory and exit */\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_cgeqrf_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_cgeqrf_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cgesdd.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function cgesdd\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cgesdd( int matrix_order, char jobz, lapack_int m,\n                           lapack_int n, lapack_complex_float* a,\n                           lapack_int lda, float* s, lapack_complex_float* u,\n                           lapack_int ldu, lapack_complex_float* vt,\n                           lapack_int ldvt )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    /* Additional scalars declarations for work arrays */\n    size_t lrwork;\n    lapack_int* iwork = NULL;\n    float* rwork = NULL;\n    lapack_complex_float* work = NULL;\n    lapack_complex_float work_query;\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_cgesdd\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_cge_nancheck( matrix_order, m, n, a, lda ) ) {\n        return -5;\n    }\n#endif\n    /* Additional scalars initializations for work arrays */\n    if( LAPACKE_lsame( jobz, 'n' ) ) {\n        lrwork = MAX(1,5*MIN(m,n));\n    } else {\n        lrwork = (size_t)5*MAX(1,MIN(m,n))*MAX(1,MIN(m,n))+7*MIN(m,n);\n    }\n    /* Allocate memory for working array(s) */\n    iwork = (lapack_int*)\n        LAPACKE_malloc( sizeof(lapack_int) * MAX(1,8*MIN(m,n)) );\n    if( iwork == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    rwork = (float*)LAPACKE_malloc( sizeof(float) * lrwork );\n    if( rwork == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_1;\n    }\n    /* Query optimal working array(s) size */\n    info = LAPACKE_cgesdd_work( matrix_order, jobz, m, n, a, lda, s, u, ldu, vt,\n                                ldvt, &work_query, lwork, rwork, iwork );\n    if( info != 0 ) {\n        goto exit_level_2;\n    }\n    lwork = LAPACK_C2INT( work_query );\n    /* Allocate memory for work arrays */\n    work = (lapack_complex_float*)\n        LAPACKE_malloc( sizeof(lapack_complex_float) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_2;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_cgesdd_work( matrix_order, jobz, m, n, a, lda, s, u, ldu, vt,\n                                ldvt, work, lwork, rwork, iwork );\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_2:\n    LAPACKE_free( rwork );\nexit_level_1:\n    LAPACKE_free( iwork );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_cgesdd\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cgesdd_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function cgesdd\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cgesdd_work( int matrix_order, char jobz, lapack_int m,\n                                lapack_int n, lapack_complex_float* a,\n                                lapack_int lda, float* s,\n                                lapack_complex_float* u, lapack_int ldu,\n                                lapack_complex_float* vt, lapack_int ldvt,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork, lapack_int* iwork )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_cgesdd( &jobz, &m, &n, a, &lda, s, u, &ldu, vt, &ldvt, work,\n                       &lwork, rwork, iwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int nrows_u = ( LAPACKE_lsame( jobz, 'a' ) ||\n                             LAPACKE_lsame( jobz, 's' ) ||\n                             ( LAPACKE_lsame( jobz, 'o' ) && m<n) ) ? m : 1;\n        lapack_int ncols_u = ( LAPACKE_lsame( jobz, 'a' ) ||\n                             ( LAPACKE_lsame( jobz, 'o' ) && m<n) ) ? m :\n                             ( LAPACKE_lsame( jobz, 's' ) ? MIN(m,n) : 1);\n        lapack_int nrows_vt = ( LAPACKE_lsame( jobz, 'a' ) ||\n                              ( LAPACKE_lsame( jobz, 'o' ) && m<n) ) ? n :\n                              ( LAPACKE_lsame( jobz, 's' ) ? MIN(m,n) : 1);\n        lapack_int lda_t = MAX(1,m);\n        lapack_int ldu_t = MAX(1,nrows_u);\n        lapack_int ldvt_t = MAX(1,nrows_vt);\n        lapack_complex_float* a_t = NULL;\n        lapack_complex_float* u_t = NULL;\n        lapack_complex_float* vt_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -6;\n            LAPACKE_xerbla( \"LAPACKE_cgesdd_work\", info );\n            return info;\n        }\n        if( ldu < ncols_u ) {\n            info = -9;\n            LAPACKE_xerbla( \"LAPACKE_cgesdd_work\", info );\n            return info;\n        }\n        if( ldvt < n ) {\n            info = -11;\n            LAPACKE_xerbla( \"LAPACKE_cgesdd_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_cgesdd( &jobz, &m, &n, a, &lda_t, s, u, &ldu_t, vt, &ldvt_t,\n                           work, &lwork, rwork, iwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_float*)\n            LAPACKE_malloc( sizeof(lapack_complex_float) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m<n) ) ) {\n            u_t = (lapack_complex_float*)\n                LAPACKE_malloc( sizeof(lapack_complex_float) *\n                                ldu_t * MAX(1,ncols_u) );\n            if( u_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_1;\n            }\n        }\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m>=n) ) ) {\n            vt_t = (lapack_complex_float*)\n                LAPACKE_malloc( sizeof(lapack_complex_float) *\n                                ldvt_t * MAX(1,n) );\n            if( vt_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_2;\n            }\n        }\n        /* Transpose input matrices */\n        LAPACKE_cge_trans( matrix_order, m, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_cgesdd( &jobz, &m, &n, a_t, &lda_t, s, u_t, &ldu_t, vt_t,\n                       &ldvt_t, work, &lwork, rwork, iwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_cge_trans( LAPACK_COL_MAJOR, m, n, a_t, lda_t, a, lda );\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m<n) ) ) {\n            LAPACKE_cge_trans( LAPACK_COL_MAJOR, nrows_u, ncols_u, u_t, ldu_t,\n                               u, ldu );\n        }\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m>=n) ) ) {\n            LAPACKE_cge_trans( LAPACK_COL_MAJOR, nrows_vt, n, vt_t, ldvt_t, vt,\n                               ldvt );\n        }\n        /* Release memory and exit */\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m>=n) ) ) {\n            LAPACKE_free( vt_t );\n        }\nexit_level_2:\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m<n) ) ) {\n            LAPACKE_free( u_t );\n        }\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_cgesdd_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_cgesdd_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cgesvd.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function cgesvd\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cgesvd( int matrix_order, char jobu, char jobvt,\n                           lapack_int m, lapack_int n, lapack_complex_float* a,\n                           lapack_int lda, float* s, lapack_complex_float* u,\n                           lapack_int ldu, lapack_complex_float* vt,\n                           lapack_int ldvt, float* superb )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    float* rwork = NULL;\n    lapack_complex_float* work = NULL;\n    lapack_complex_float work_query;\n    lapack_int i;\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_cgesvd\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_cge_nancheck( matrix_order, m, n, a, lda ) ) {\n        return -6;\n    }\n#endif\n    /* Allocate memory for working array(s) */\n    rwork = (float*)LAPACKE_malloc( sizeof(float) * MAX(1,5*MIN(m,n)) );\n    if( rwork == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Query optimal working array(s) size */\n    info = LAPACKE_cgesvd_work( matrix_order, jobu, jobvt, m, n, a, lda, s, u,\n                                ldu, vt, ldvt, &work_query, lwork, rwork );\n    if( info != 0 ) {\n        goto exit_level_1;\n    }\n    lwork = LAPACK_C2INT( work_query );\n    /* Allocate memory for work arrays */\n    work = (lapack_complex_float*)\n        LAPACKE_malloc( sizeof(lapack_complex_float) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_1;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_cgesvd_work( matrix_order, jobu, jobvt, m, n, a, lda, s, u,\n                                ldu, vt, ldvt, work, lwork, rwork );\n    /* Backup significant data from working array(s) */\n    for( i=0; i<MIN(m,n)-1; i++ ) {\n        superb[i] = rwork[i];\n    }\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_1:\n    LAPACKE_free( rwork );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_cgesvd\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cgesvd_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function cgesvd\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cgesvd_work( int matrix_order, char jobu, char jobvt,\n                                lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                float* s, lapack_complex_float* u,\n                                lapack_int ldu, lapack_complex_float* vt,\n                                lapack_int ldvt, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_cgesvd( &jobu, &jobvt, &m, &n, a, &lda, s, u, &ldu, vt, &ldvt,\n                       work, &lwork, rwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int nrows_u = ( LAPACKE_lsame( jobu, 'a' ) ||\n                             LAPACKE_lsame( jobu, 's' ) ) ? m : 1;\n        lapack_int ncols_u = LAPACKE_lsame( jobu, 'a' ) ? m :\n                             ( LAPACKE_lsame( jobu, 's' ) ? MIN(m,n) : 1);\n        lapack_int nrows_vt = LAPACKE_lsame( jobvt, 'a' ) ? n :\n                              ( LAPACKE_lsame( jobvt, 's' ) ? MIN(m,n) : 1);\n        lapack_int lda_t = MAX(1,m);\n        lapack_int ldu_t = MAX(1,nrows_u);\n        lapack_int ldvt_t = MAX(1,nrows_vt);\n        lapack_complex_float* a_t = NULL;\n        lapack_complex_float* u_t = NULL;\n        lapack_complex_float* vt_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -7;\n            LAPACKE_xerbla( \"LAPACKE_cgesvd_work\", info );\n            return info;\n        }\n        if( ldu < ncols_u ) {\n            info = -10;\n            LAPACKE_xerbla( \"LAPACKE_cgesvd_work\", info );\n            return info;\n        }\n        if( ldvt < n ) {\n            info = -12;\n            LAPACKE_xerbla( \"LAPACKE_cgesvd_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_cgesvd( &jobu, &jobvt, &m, &n, a, &lda_t, s, u, &ldu_t, vt,\n                           &ldvt_t, work, &lwork, rwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_float*)\n            LAPACKE_malloc( sizeof(lapack_complex_float) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) {\n            u_t = (lapack_complex_float*)\n                LAPACKE_malloc( sizeof(lapack_complex_float) *\n                                ldu_t * MAX(1,ncols_u) );\n            if( u_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_1;\n            }\n        }\n        if( LAPACKE_lsame( jobvt, 'a' ) || LAPACKE_lsame( jobvt, 's' ) ) {\n            vt_t = (lapack_complex_float*)\n                LAPACKE_malloc( sizeof(lapack_complex_float) *\n                                ldvt_t * MAX(1,n) );\n            if( vt_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_2;\n            }\n        }\n        /* Transpose input matrices */\n        LAPACKE_cge_trans( matrix_order, m, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_cgesvd( &jobu, &jobvt, &m, &n, a_t, &lda_t, s, u_t, &ldu_t, vt_t,\n                       &ldvt_t, work, &lwork, rwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_cge_trans( LAPACK_COL_MAJOR, m, n, a_t, lda_t, a, lda );\n        if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) {\n            LAPACKE_cge_trans( LAPACK_COL_MAJOR, nrows_u, ncols_u, u_t, ldu_t,\n                               u, ldu );\n        }\n        if( LAPACKE_lsame( jobvt, 'a' ) || LAPACKE_lsame( jobvt, 's' ) ) {\n            LAPACKE_cge_trans( LAPACK_COL_MAJOR, nrows_vt, n, vt_t, ldvt_t, vt,\n                               ldvt );\n        }\n        /* Release memory and exit */\n        if( LAPACKE_lsame( jobvt, 'a' ) || LAPACKE_lsame( jobvt, 's' ) ) {\n            LAPACKE_free( vt_t );\n        }\nexit_level_2:\n        if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) {\n            LAPACKE_free( u_t );\n        }\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_cgesvd_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_cgesvd_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cgetrf.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function cgetrf\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cgetrf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* ipiv )\n{\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_cgetrf\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_cge_nancheck( matrix_order, m, n, a, lda ) ) {\n        return -4;\n    }\n#endif\n    return LAPACKE_cgetrf_work( matrix_order, m, n, a, lda, ipiv );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cgetrf_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function cgetrf\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cgetrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int* ipiv )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_cgetrf( &m, &n, a, &lda, ipiv, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,m);\n        lapack_complex_float* a_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -5;\n            LAPACKE_xerbla( \"LAPACKE_cgetrf_work\", info );\n            return info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_float*)\n            LAPACKE_malloc( sizeof(lapack_complex_float) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        /* Transpose input matrices */\n        LAPACKE_cge_trans( matrix_order, m, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_cgetrf( &m, &n, a_t, &lda_t, ipiv, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_cge_trans( LAPACK_COL_MAJOR, m, n, a_t, lda_t, a, lda );\n        /* Release memory and exit */\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_cgetrf_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_cgetrf_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cgetri.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function cgetri\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cgetri( int matrix_order, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           const lapack_int* ipiv )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    lapack_complex_float* work = NULL;\n    lapack_complex_float work_query;\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_cgetri\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_cge_nancheck( matrix_order, n, n, a, lda ) ) {\n        return -3;\n    }\n#endif\n    /* Query optimal working array(s) size */\n    info = LAPACKE_cgetri_work( matrix_order, n, a, lda, ipiv, &work_query,\n                                lwork );\n    if( info != 0 ) {\n        goto exit_level_0;\n    }\n    lwork = LAPACK_C2INT( work_query );\n    /* Allocate memory for work arrays */\n    work = (lapack_complex_float*)\n        LAPACKE_malloc( sizeof(lapack_complex_float) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_cgetri_work( matrix_order, n, a, lda, ipiv, work, lwork );\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_cgetri\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cgetri_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function cgetri\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cgetri_work( int matrix_order, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                const lapack_int* ipiv,\n                                lapack_complex_float* work, lapack_int lwork )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_cgetri( &n, a, &lda, ipiv, work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        lapack_complex_float* a_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -4;\n            LAPACKE_xerbla( \"LAPACKE_cgetri_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_cgetri( &n, a, &lda_t, ipiv, work, &lwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_float*)\n            LAPACKE_malloc( sizeof(lapack_complex_float) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        /* Transpose input matrices */\n        LAPACKE_cge_trans( matrix_order, n, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_cgetri( &n, a_t, &lda_t, ipiv, work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_cge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda );\n        /* Release memory and exit */\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_cgetri_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_cgetri_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cgetrs.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function cgetrs\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cgetrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, const lapack_int* ipiv,\n                           lapack_complex_float* b, lapack_int ldb )\n{\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_cgetrs\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_cge_nancheck( matrix_order, n, n, a, lda ) ) {\n        return -5;\n    }\n    if( LAPACKE_cge_nancheck( matrix_order, n, nrhs, b, ldb ) ) {\n        return -8;\n    }\n#endif\n    return LAPACKE_cgetrs_work( matrix_order, trans, n, nrhs, a, lda, ipiv, b,\n                                ldb );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cgetrs_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function cgetrs\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cgetrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                lapack_complex_float* b, lapack_int ldb )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_cgetrs( &trans, &n, &nrhs, a, &lda, ipiv, b, &ldb, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        lapack_int ldb_t = MAX(1,n);\n        lapack_complex_float* a_t = NULL;\n        lapack_complex_float* b_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -6;\n            LAPACKE_xerbla( \"LAPACKE_cgetrs_work\", info );\n            return info;\n        }\n        if( ldb < nrhs ) {\n            info = -9;\n            LAPACKE_xerbla( \"LAPACKE_cgetrs_work\", info );\n            return info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_float*)\n            LAPACKE_malloc( sizeof(lapack_complex_float) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        b_t = (lapack_complex_float*)\n            LAPACKE_malloc( sizeof(lapack_complex_float) *\n                            ldb_t * MAX(1,nrhs) );\n        if( b_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_1;\n        }\n        /* Transpose input matrices */\n        LAPACKE_cge_trans( matrix_order, n, n, a, lda, a_t, lda_t );\n        LAPACKE_cge_trans( matrix_order, n, nrhs, b, ldb, b_t, ldb_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_cgetrs( &trans, &n, &nrhs, a_t, &lda_t, ipiv, b_t, &ldb_t,\n                       &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_cge_trans( LAPACK_COL_MAJOR, n, nrhs, b_t, ldb_t, b, ldb );\n        /* Release memory and exit */\n        LAPACKE_free( b_t );\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_cgetrs_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_cgetrs_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cpotrf.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function cpotrf\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cpotrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda )\n{\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_cpotrf\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_cpo_nancheck( matrix_order, uplo, n, a, lda ) ) {\n        return -4;\n    }\n#endif\n    return LAPACKE_cpotrf_work( matrix_order, uplo, n, a, lda );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cpotrf_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function cpotrf\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cpotrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_cpotrf( &uplo, &n, a, &lda, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        lapack_complex_float* a_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -5;\n            LAPACKE_xerbla( \"LAPACKE_cpotrf_work\", info );\n            return info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_float*)\n            LAPACKE_malloc( sizeof(lapack_complex_float) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        /* Transpose input matrices */\n        LAPACKE_cpo_trans( matrix_order, uplo, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_cpotrf( &uplo, &n, a_t, &lda_t, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_cpo_trans( LAPACK_COL_MAJOR, uplo, n, a_t, lda_t, a, lda );\n        /* Release memory and exit */\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_cpotrf_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_cpotrf_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cpotri.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function cpotri\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cpotri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda )\n{\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_cpotri\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_cpo_nancheck( matrix_order, uplo, n, a, lda ) ) {\n        return -4;\n    }\n#endif\n    return LAPACKE_cpotri_work( matrix_order, uplo, n, a, lda );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cpotri_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function cpotri\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cpotri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_cpotri( &uplo, &n, a, &lda, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        lapack_complex_float* a_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -5;\n            LAPACKE_xerbla( \"LAPACKE_cpotri_work\", info );\n            return info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_float*)\n            LAPACKE_malloc( sizeof(lapack_complex_float) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        /* Transpose input matrices */\n        LAPACKE_cpo_trans( matrix_order, uplo, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_cpotri( &uplo, &n, a_t, &lda_t, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_cpo_trans( LAPACK_COL_MAJOR, uplo, n, a_t, lda_t, a, lda );\n        /* Release memory and exit */\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_cpotri_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_cpotri_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cpotrs.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function cpotrs\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cpotrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb )\n{\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_cpotrs\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_cpo_nancheck( matrix_order, uplo, n, a, lda ) ) {\n        return -5;\n    }\n    if( LAPACKE_cge_nancheck( matrix_order, n, nrhs, b, ldb ) ) {\n        return -7;\n    }\n#endif\n    return LAPACKE_cpotrs_work( matrix_order, uplo, n, nrhs, a, lda, b, ldb );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cpotrs_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function cpotrs\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cpotrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* b,\n                                lapack_int ldb )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_cpotrs( &uplo, &n, &nrhs, a, &lda, b, &ldb, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        lapack_int ldb_t = MAX(1,n);\n        lapack_complex_float* a_t = NULL;\n        lapack_complex_float* b_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -6;\n            LAPACKE_xerbla( \"LAPACKE_cpotrs_work\", info );\n            return info;\n        }\n        if( ldb < nrhs ) {\n            info = -8;\n            LAPACKE_xerbla( \"LAPACKE_cpotrs_work\", info );\n            return info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_float*)\n            LAPACKE_malloc( sizeof(lapack_complex_float) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        b_t = (lapack_complex_float*)\n            LAPACKE_malloc( sizeof(lapack_complex_float) *\n                            ldb_t * MAX(1,nrhs) );\n        if( b_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_1;\n        }\n        /* Transpose input matrices */\n        LAPACKE_cpo_trans( matrix_order, uplo, n, a, lda, a_t, lda_t );\n        LAPACKE_cge_trans( matrix_order, n, nrhs, b, ldb, b_t, ldb_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_cpotrs( &uplo, &n, &nrhs, a_t, &lda_t, b_t, &ldb_t, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_cge_trans( LAPACK_COL_MAJOR, n, nrhs, b_t, ldb_t, b, ldb );\n        /* Release memory and exit */\n        LAPACKE_free( b_t );\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_cpotrs_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_cpotrs_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cunmqr.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2014, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function cunmqr\n* Author: Intel Corporation\n* Generated November 2015\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cunmqr( int matrix_layout, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* tau,\n                           lapack_complex_float* c, lapack_int ldc )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    lapack_complex_float* work = NULL;\n    lapack_complex_float work_query;\n    lapack_int r;\n    if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_cunmqr\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    r = LAPACKE_lsame( side, 'l' ) ? m : n;\n    if( LAPACKE_cge_nancheck( matrix_layout, r, k, a, lda ) ) {\n        return -7;\n    }\n    if( LAPACKE_cge_nancheck( matrix_layout, m, n, c, ldc ) ) {\n        return -10;\n    }\n    if( LAPACKE_c_nancheck( k, tau, 1 ) ) {\n        return -9;\n    }\n#endif\n    /* Query optimal working array(s) size */\n    info = LAPACKE_cunmqr_work( matrix_layout, side, trans, m, n, k, a, lda, tau,\n                                c, ldc, &work_query, lwork );\n    if( info != 0 ) {\n        goto exit_level_0;\n    }\n    lwork = LAPACK_C2INT( work_query );\n    /* Allocate memory for work arrays */\n    work = (lapack_complex_float*)\n        LAPACKE_malloc( sizeof(lapack_complex_float) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_cunmqr_work( matrix_layout, side, trans, m, n, k, a, lda, tau,\n                                c, ldc, work, lwork );\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_cunmqr\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_cunmqr_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2014, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function cunmqr\n* Author: Intel Corporation\n* Generated November 2015\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_cunmqr_work( int matrix_layout, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, lapack_int lwork )\n{\n    lapack_int info = 0;\n    if( matrix_layout == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_cunmqr( &side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work,\n                       &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_layout == LAPACK_ROW_MAJOR ) {\n        lapack_int r = LAPACKE_lsame( side, 'l' ) ? m : n;\n        lapack_int lda_t = MAX(1,r);\n        lapack_int ldc_t = MAX(1,m);\n        lapack_complex_float* a_t = NULL;\n        lapack_complex_float* c_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < k ) {\n            info = -8;\n            LAPACKE_xerbla( \"LAPACKE_cunmqr_work\", info );\n            return info;\n        }\n        if( ldc < n ) {\n            info = -11;\n            LAPACKE_xerbla( \"LAPACKE_cunmqr_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_cunmqr( &side, &trans, &m, &n, &k, a, &lda_t, tau, c, &ldc_t,\n                           work, &lwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_float*)\n            LAPACKE_malloc( sizeof(lapack_complex_float) * lda_t * MAX(1,k) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        c_t = (lapack_complex_float*)\n            LAPACKE_malloc( sizeof(lapack_complex_float) * ldc_t * MAX(1,n) );\n        if( c_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_1;\n        }\n        /* Transpose input matrices */\n        LAPACKE_cge_trans( matrix_layout, r, k, a, lda, a_t, lda_t );\n        LAPACKE_cge_trans( matrix_layout, m, n, c, ldc, c_t, ldc_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_cunmqr( &side, &trans, &m, &n, &k, a_t, &lda_t, tau, c_t, &ldc_t,\n                       work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_cge_trans( LAPACK_COL_MAJOR, m, n, c_t, ldc_t, c, ldc );\n        /* Release memory and exit */\n        LAPACKE_free( c_t );\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_cunmqr_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_cunmqr_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dgeev.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function dgeev\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dgeev( int matrix_order, char jobvl, char jobvr,\n                          lapack_int n, double* a, lapack_int lda, double* wr,\n                          double* wi, double* vl, lapack_int ldvl, double* vr,\n                          lapack_int ldvr )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    double* work = NULL;\n    double work_query;\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_dgeev\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_dge_nancheck( matrix_order, n, n, a, lda ) ) {\n        return -5;\n    }\n#endif\n    /* Query optimal working array(s) size */\n    info = LAPACKE_dgeev_work( matrix_order, jobvl, jobvr, n, a, lda, wr, wi,\n                               vl, ldvl, vr, ldvr, &work_query, lwork );\n    if( info != 0 ) {\n        goto exit_level_0;\n    }\n    lwork = (lapack_int)work_query;\n    /* Allocate memory for work arrays */\n    work = (double*)LAPACKE_malloc( sizeof(double) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_dgeev_work( matrix_order, jobvl, jobvr, n, a, lda, wr, wi,\n                               vl, ldvl, vr, ldvr, work, lwork );\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_dgeev\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dgeev_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function dgeev\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dgeev_work( int matrix_order, char jobvl, char jobvr,\n                               lapack_int n, double* a, lapack_int lda,\n                               double* wr, double* wi, double* vl,\n                               lapack_int ldvl, double* vr, lapack_int ldvr,\n                               double* work, lapack_int lwork )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_dgeev( &jobvl, &jobvr, &n, a, &lda, wr, wi, vl, &ldvl, vr, &ldvr,\n                      work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        lapack_int ldvl_t = MAX(1,n);\n        lapack_int ldvr_t = MAX(1,n);\n        double* a_t = NULL;\n        double* vl_t = NULL;\n        double* vr_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -6;\n            LAPACKE_xerbla( \"LAPACKE_dgeev_work\", info );\n            return info;\n        }\n        if( ldvl < n ) {\n            info = -10;\n            LAPACKE_xerbla( \"LAPACKE_dgeev_work\", info );\n            return info;\n        }\n        if( ldvr < n ) {\n            info = -12;\n            LAPACKE_xerbla( \"LAPACKE_dgeev_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_dgeev( &jobvl, &jobvr, &n, a, &lda_t, wr, wi, vl, &ldvl_t,\n                          vr, &ldvr_t, work, &lwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (double*)LAPACKE_malloc( sizeof(double) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        if( LAPACKE_lsame( jobvl, 'v' ) ) {\n            vl_t = (double*)\n                LAPACKE_malloc( sizeof(double) * ldvl_t * MAX(1,n) );\n            if( vl_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_1;\n            }\n        }\n        if( LAPACKE_lsame( jobvr, 'v' ) ) {\n            vr_t = (double*)\n                LAPACKE_malloc( sizeof(double) * ldvr_t * MAX(1,n) );\n            if( vr_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_2;\n            }\n        }\n        /* Transpose input matrices */\n        LAPACKE_dge_trans( matrix_order, n, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_dgeev( &jobvl, &jobvr, &n, a_t, &lda_t, wr, wi, vl_t, &ldvl_t,\n                      vr_t, &ldvr_t, work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_dge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda );\n        if( LAPACKE_lsame( jobvl, 'v' ) ) {\n            LAPACKE_dge_trans( LAPACK_COL_MAJOR, n, n, vl_t, ldvl_t, vl, ldvl );\n        }\n        if( LAPACKE_lsame( jobvr, 'v' ) ) {\n            LAPACKE_dge_trans( LAPACK_COL_MAJOR, n, n, vr_t, ldvr_t, vr, ldvr );\n        }\n        /* Release memory and exit */\n        if( LAPACKE_lsame( jobvr, 'v' ) ) {\n            LAPACKE_free( vr_t );\n        }\nexit_level_2:\n        if( LAPACKE_lsame( jobvl, 'v' ) ) {\n            LAPACKE_free( vl_t );\n        }\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_dgeev_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_dgeev_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dgeqrf.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2014, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function dgeqrf\n* Author: Intel Corporation\n* Generated November 2015\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dgeqrf( int matrix_layout, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, double* tau )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    double* work = NULL;\n    double work_query;\n    if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_dgeqrf\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_dge_nancheck( matrix_layout, m, n, a, lda ) ) {\n        return -4;\n    }\n#endif\n    /* Query optimal working array(s) size */\n    info = LAPACKE_dgeqrf_work( matrix_layout, m, n, a, lda, tau, &work_query,\n                                lwork );\n    if( info != 0 ) {\n        goto exit_level_0;\n    }\n    lwork = (lapack_int)work_query;\n    /* Allocate memory for work arrays */\n    work = (double*)LAPACKE_malloc( sizeof(double) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_dgeqrf_work( matrix_layout, m, n, a, lda, tau, work, lwork );\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_dgeqrf\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dgeqrf_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2014, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function dgeqrf\n* Author: Intel Corporation\n* Generated November 2015\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dgeqrf_work( int matrix_layout, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, double* tau,\n                                double* work, lapack_int lwork )\n{\n    lapack_int info = 0;\n    if( matrix_layout == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_dgeqrf( &m, &n, a, &lda, tau, work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_layout == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,m);\n        double* a_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -5;\n            LAPACKE_xerbla( \"LAPACKE_dgeqrf_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_dgeqrf( &m, &n, a, &lda_t, tau, work, &lwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (double*)LAPACKE_malloc( sizeof(double) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        /* Transpose input matrices */\n        LAPACKE_dge_trans( matrix_layout, m, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_dgeqrf( &m, &n, a_t, &lda_t, tau, work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_dge_trans( LAPACK_COL_MAJOR, m, n, a_t, lda_t, a, lda );\n        /* Release memory and exit */\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_dgeqrf_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_dgeqrf_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dgesdd.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function dgesdd\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dgesdd( int matrix_order, char jobz, lapack_int m,\n                           lapack_int n, double* a, lapack_int lda, double* s,\n                           double* u, lapack_int ldu, double* vt,\n                           lapack_int ldvt )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    lapack_int* iwork = NULL;\n    double* work = NULL;\n    double work_query;\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_dgesdd\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_dge_nancheck( matrix_order, m, n, a, lda ) ) {\n        return -5;\n    }\n#endif\n    /* Allocate memory for working array(s) */\n    iwork = (lapack_int*)\n        LAPACKE_malloc( sizeof(lapack_int) * MAX(1,8*MIN(m,n)) );\n    if( iwork == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Query optimal working array(s) size */\n    info = LAPACKE_dgesdd_work( matrix_order, jobz, m, n, a, lda, s, u, ldu, vt,\n                                ldvt, &work_query, lwork, iwork );\n    if( info != 0 ) {\n        goto exit_level_1;\n    }\n    lwork = (lapack_int)work_query;\n    /* Allocate memory for work arrays */\n    work = (double*)LAPACKE_malloc( sizeof(double) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_1;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_dgesdd_work( matrix_order, jobz, m, n, a, lda, s, u, ldu, vt,\n                                ldvt, work, lwork, iwork );\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_1:\n    LAPACKE_free( iwork );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_dgesdd\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dgesdd_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function dgesdd\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dgesdd_work( int matrix_order, char jobz, lapack_int m,\n                                lapack_int n, double* a, lapack_int lda,\n                                double* s, double* u, lapack_int ldu,\n                                double* vt, lapack_int ldvt, double* work,\n                                lapack_int lwork, lapack_int* iwork )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_dgesdd( &jobz, &m, &n, a, &lda, s, u, &ldu, vt, &ldvt, work,\n                       &lwork, iwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int nrows_u = ( LAPACKE_lsame( jobz, 'a' ) ||\n                             LAPACKE_lsame( jobz, 's' ) ||\n                             ( LAPACKE_lsame( jobz, 'o' ) && m<n) ) ? m : 1;\n        lapack_int ncols_u = ( LAPACKE_lsame( jobz, 'a' ) ||\n                             ( LAPACKE_lsame( jobz, 'o' ) && m<n) ) ? m :\n                             ( LAPACKE_lsame( jobz, 's' ) ? MIN(m,n) : 1);\n        lapack_int nrows_vt = ( LAPACKE_lsame( jobz, 'a' ) ||\n                              ( LAPACKE_lsame( jobz, 'o' ) && m<n) ) ? n :\n                              ( LAPACKE_lsame( jobz, 's' ) ? MIN(m,n) : 1);\n        lapack_int lda_t = MAX(1,m);\n        lapack_int ldu_t = MAX(1,nrows_u);\n        lapack_int ldvt_t = MAX(1,nrows_vt);\n        double* a_t = NULL;\n        double* u_t = NULL;\n        double* vt_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -6;\n            LAPACKE_xerbla( \"LAPACKE_dgesdd_work\", info );\n            return info;\n        }\n        if( ldu < ncols_u ) {\n            info = -9;\n            LAPACKE_xerbla( \"LAPACKE_dgesdd_work\", info );\n            return info;\n        }\n        if( ldvt < n ) {\n            info = -11;\n            LAPACKE_xerbla( \"LAPACKE_dgesdd_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_dgesdd( &jobz, &m, &n, a, &lda_t, s, u, &ldu_t, vt, &ldvt_t,\n                           work, &lwork, iwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (double*)LAPACKE_malloc( sizeof(double) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m<n) ) ) {\n            u_t = (double*)\n                LAPACKE_malloc( sizeof(double) * ldu_t * MAX(1,ncols_u) );\n            if( u_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_1;\n            }\n        }\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m>=n) ) ) {\n            vt_t = (double*)\n                LAPACKE_malloc( sizeof(double) * ldvt_t * MAX(1,n) );\n            if( vt_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_2;\n            }\n        }\n        /* Transpose input matrices */\n        LAPACKE_dge_trans( matrix_order, m, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_dgesdd( &jobz, &m, &n, a_t, &lda_t, s, u_t, &ldu_t, vt_t,\n                       &ldvt_t, work, &lwork, iwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_dge_trans( LAPACK_COL_MAJOR, m, n, a_t, lda_t, a, lda );\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m<n) ) ) {\n            LAPACKE_dge_trans( LAPACK_COL_MAJOR, nrows_u, ncols_u, u_t, ldu_t,\n                               u, ldu );\n        }\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m>=n) ) ) {\n            LAPACKE_dge_trans( LAPACK_COL_MAJOR, nrows_vt, n, vt_t, ldvt_t, vt,\n                               ldvt );\n        }\n        /* Release memory and exit */\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m>=n) ) ) {\n            LAPACKE_free( vt_t );\n        }\nexit_level_2:\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m<n) ) ) {\n            LAPACKE_free( u_t );\n        }\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_dgesdd_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_dgesdd_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dgesvd.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function dgesvd\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dgesvd( int matrix_order, char jobu, char jobvt,\n                           lapack_int m, lapack_int n, double* a,\n                           lapack_int lda, double* s, double* u, lapack_int ldu,\n                           double* vt, lapack_int ldvt, double* superb )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    double* work = NULL;\n    double work_query;\n    lapack_int i;\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_dgesvd\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_dge_nancheck( matrix_order, m, n, a, lda ) ) {\n        return -6;\n    }\n#endif\n    /* Query optimal working array(s) size */\n    info = LAPACKE_dgesvd_work( matrix_order, jobu, jobvt, m, n, a, lda, s, u,\n                                ldu, vt, ldvt, &work_query, lwork );\n    if( info != 0 ) {\n        goto exit_level_0;\n    }\n    lwork = (lapack_int)work_query;\n    /* Allocate memory for work arrays */\n    work = (double*)LAPACKE_malloc( sizeof(double) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_dgesvd_work( matrix_order, jobu, jobvt, m, n, a, lda, s, u,\n                                ldu, vt, ldvt, work, lwork );\n    /* Backup significant data from working array(s) */\n    for( i=0; i<MIN(m,n)-1; i++ ) {\n        superb[i] = work[i+1];\n    }\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_dgesvd\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dgesvd_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function dgesvd\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dgesvd_work( int matrix_order, char jobu, char jobvt,\n                                lapack_int m, lapack_int n, double* a,\n                                lapack_int lda, double* s, double* u,\n                                lapack_int ldu, double* vt, lapack_int ldvt,\n                                double* work, lapack_int lwork )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_dgesvd( &jobu, &jobvt, &m, &n, a, &lda, s, u, &ldu, vt, &ldvt,\n                       work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int nrows_u = ( LAPACKE_lsame( jobu, 'a' ) ||\n                             LAPACKE_lsame( jobu, 's' ) ) ? m : 1;\n        lapack_int ncols_u = LAPACKE_lsame( jobu, 'a' ) ? m :\n                             ( LAPACKE_lsame( jobu, 's' ) ? MIN(m,n) : 1);\n        lapack_int nrows_vt = LAPACKE_lsame( jobvt, 'a' ) ? n :\n                              ( LAPACKE_lsame( jobvt, 's' ) ? MIN(m,n) : 1);\n        lapack_int lda_t = MAX(1,m);\n        lapack_int ldu_t = MAX(1,nrows_u);\n        lapack_int ldvt_t = MAX(1,nrows_vt);\n        double* a_t = NULL;\n        double* u_t = NULL;\n        double* vt_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -7;\n            LAPACKE_xerbla( \"LAPACKE_dgesvd_work\", info );\n            return info;\n        }\n        if( ldu < ncols_u ) {\n            info = -10;\n            LAPACKE_xerbla( \"LAPACKE_dgesvd_work\", info );\n            return info;\n        }\n        if( ldvt < n ) {\n            info = -12;\n            LAPACKE_xerbla( \"LAPACKE_dgesvd_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_dgesvd( &jobu, &jobvt, &m, &n, a, &lda_t, s, u, &ldu_t, vt,\n                           &ldvt_t, work, &lwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (double*)LAPACKE_malloc( sizeof(double) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) {\n            u_t = (double*)\n                LAPACKE_malloc( sizeof(double) * ldu_t * MAX(1,ncols_u) );\n            if( u_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_1;\n            }\n        }\n        if( LAPACKE_lsame( jobvt, 'a' ) || LAPACKE_lsame( jobvt, 's' ) ) {\n            vt_t = (double*)\n                LAPACKE_malloc( sizeof(double) * ldvt_t * MAX(1,n) );\n            if( vt_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_2;\n            }\n        }\n        /* Transpose input matrices */\n        LAPACKE_dge_trans( matrix_order, m, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_dgesvd( &jobu, &jobvt, &m, &n, a_t, &lda_t, s, u_t, &ldu_t, vt_t,\n                       &ldvt_t, work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_dge_trans( LAPACK_COL_MAJOR, m, n, a_t, lda_t, a, lda );\n        if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) {\n            LAPACKE_dge_trans( LAPACK_COL_MAJOR, nrows_u, ncols_u, u_t, ldu_t,\n                               u, ldu );\n        }\n        if( LAPACKE_lsame( jobvt, 'a' ) || LAPACKE_lsame( jobvt, 's' ) ) {\n            LAPACKE_dge_trans( LAPACK_COL_MAJOR, nrows_vt, n, vt_t, ldvt_t, vt,\n                               ldvt );\n        }\n        /* Release memory and exit */\n        if( LAPACKE_lsame( jobvt, 'a' ) || LAPACKE_lsame( jobvt, 's' ) ) {\n            LAPACKE_free( vt_t );\n        }\nexit_level_2:\n        if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) {\n            LAPACKE_free( u_t );\n        }\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_dgesvd_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_dgesvd_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dgetrf.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function dgetrf\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dgetrf( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, lapack_int* ipiv )\n{\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_dgetrf\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_dge_nancheck( matrix_order, m, n, a, lda ) ) {\n        return -4;\n    }\n#endif\n    return LAPACKE_dgetrf_work( matrix_order, m, n, a, lda, ipiv );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dgetrf_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function dgetrf\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dgetrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, lapack_int* ipiv )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_dgetrf( &m, &n, a, &lda, ipiv, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,m);\n        double* a_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -5;\n            LAPACKE_xerbla( \"LAPACKE_dgetrf_work\", info );\n            return info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (double*)LAPACKE_malloc( sizeof(double) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        /* Transpose input matrices */\n        LAPACKE_dge_trans( matrix_order, m, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_dgetrf( &m, &n, a_t, &lda_t, ipiv, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_dge_trans( LAPACK_COL_MAJOR, m, n, a_t, lda_t, a, lda );\n        /* Release memory and exit */\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_dgetrf_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_dgetrf_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dgetri.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function dgetri\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dgetri( int matrix_order, lapack_int n, double* a,\n                           lapack_int lda, const lapack_int* ipiv )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    double* work = NULL;\n    double work_query;\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_dgetri\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_dge_nancheck( matrix_order, n, n, a, lda ) ) {\n        return -3;\n    }\n#endif\n    /* Query optimal working array(s) size */\n    info = LAPACKE_dgetri_work( matrix_order, n, a, lda, ipiv, &work_query,\n                                lwork );\n    if( info != 0 ) {\n        goto exit_level_0;\n    }\n    lwork = (lapack_int)work_query;\n    /* Allocate memory for work arrays */\n    work = (double*)LAPACKE_malloc( sizeof(double) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_dgetri_work( matrix_order, n, a, lda, ipiv, work, lwork );\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_dgetri\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dgetri_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function dgetri\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dgetri_work( int matrix_order, lapack_int n, double* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                double* work, lapack_int lwork )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_dgetri( &n, a, &lda, ipiv, work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        double* a_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -4;\n            LAPACKE_xerbla( \"LAPACKE_dgetri_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_dgetri( &n, a, &lda_t, ipiv, work, &lwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (double*)LAPACKE_malloc( sizeof(double) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        /* Transpose input matrices */\n        LAPACKE_dge_trans( matrix_order, n, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_dgetri( &n, a_t, &lda_t, ipiv, work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_dge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda );\n        /* Release memory and exit */\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_dgetri_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_dgetri_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dgetrs.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function dgetrs\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dgetrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const double* a, lapack_int lda,\n                           const lapack_int* ipiv, double* b, lapack_int ldb )\n{\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_dgetrs\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_dge_nancheck( matrix_order, n, n, a, lda ) ) {\n        return -5;\n    }\n    if( LAPACKE_dge_nancheck( matrix_order, n, nrhs, b, ldb ) ) {\n        return -8;\n    }\n#endif\n    return LAPACKE_dgetrs_work( matrix_order, trans, n, nrhs, a, lda, ipiv, b,\n                                ldb );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dgetrs_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function dgetrs\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dgetrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const double* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                double* b, lapack_int ldb )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_dgetrs( &trans, &n, &nrhs, a, &lda, ipiv, b, &ldb, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        lapack_int ldb_t = MAX(1,n);\n        double* a_t = NULL;\n        double* b_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -6;\n            LAPACKE_xerbla( \"LAPACKE_dgetrs_work\", info );\n            return info;\n        }\n        if( ldb < nrhs ) {\n            info = -9;\n            LAPACKE_xerbla( \"LAPACKE_dgetrs_work\", info );\n            return info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (double*)LAPACKE_malloc( sizeof(double) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        b_t = (double*)LAPACKE_malloc( sizeof(double) * ldb_t * MAX(1,nrhs) );\n        if( b_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_1;\n        }\n        /* Transpose input matrices */\n        LAPACKE_dge_trans( matrix_order, n, n, a, lda, a_t, lda_t );\n        LAPACKE_dge_trans( matrix_order, n, nrhs, b, ldb, b_t, ldb_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_dgetrs( &trans, &n, &nrhs, a_t, &lda_t, ipiv, b_t, &ldb_t,\n                       &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_dge_trans( LAPACK_COL_MAJOR, n, nrhs, b_t, ldb_t, b, ldb );\n        /* Release memory and exit */\n        LAPACKE_free( b_t );\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_dgetrs_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_dgetrs_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dormqr.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2014, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function dormqr\n* Author: Intel Corporation\n* Generated November 2015\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dormqr( int matrix_layout, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const double* a, lapack_int lda, const double* tau,\n                           double* c, lapack_int ldc )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    double* work = NULL;\n    double work_query;\n    lapack_int r;\n    if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_dormqr\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    r = LAPACKE_lsame( side, 'l' ) ? m : n;\n    if( LAPACKE_dge_nancheck( matrix_layout, r, k, a, lda ) ) {\n        return -7;\n    }\n    if( LAPACKE_dge_nancheck( matrix_layout, m, n, c, ldc ) ) {\n        return -10;\n    }\n    if( LAPACKE_d_nancheck( k, tau, 1 ) ) {\n        return -9;\n    }\n#endif\n    /* Query optimal working array(s) size */\n    info = LAPACKE_dormqr_work( matrix_layout, side, trans, m, n, k, a, lda, tau,\n                                c, ldc, &work_query, lwork );\n    if( info != 0 ) {\n        goto exit_level_0;\n    }\n    lwork = (lapack_int)work_query;\n    /* Allocate memory for work arrays */\n    work = (double*)LAPACKE_malloc( sizeof(double) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_dormqr_work( matrix_layout, side, trans, m, n, k, a, lda, tau,\n                                c, ldc, work, lwork );\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_dormqr\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dormqr_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2014, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function dormqr\n* Author: Intel Corporation\n* Generated November 2015\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dormqr_work( int matrix_layout, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const double* a, lapack_int lda,\n                                const double* tau, double* c, lapack_int ldc,\n                                double* work, lapack_int lwork )\n{\n    lapack_int info = 0;\n    lapack_int r;\n    lapack_int lda_t, ldc_t;\n    double *a_t = NULL, *c_t = NULL;\n    if( matrix_layout == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_dormqr( &side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work,\n                       &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_layout == LAPACK_ROW_MAJOR ) {\n        r = LAPACKE_lsame( side, 'l' ) ? m : n;\n        lda_t = MAX(1,r);\n        ldc_t = MAX(1,m);\n        /* Check leading dimension(s) */\n        if( lda < k ) {\n            info = -8;\n            LAPACKE_xerbla( \"LAPACKE_dormqr_work\", info );\n            return info;\n        }\n        if( ldc < n ) {\n            info = -11;\n            LAPACKE_xerbla( \"LAPACKE_dormqr_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_dormqr( &side, &trans, &m, &n, &k, a, &lda_t, tau, c, &ldc_t,\n                           work, &lwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (double*)LAPACKE_malloc( sizeof(double) * lda_t * MAX(1,k) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        c_t = (double*)LAPACKE_malloc( sizeof(double) * ldc_t * MAX(1,n) );\n        if( c_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_1;\n        }\n        /* Transpose input matrices */\n        LAPACKE_dge_trans( matrix_layout, r, k, a, lda, a_t, lda_t );\n        LAPACKE_dge_trans( matrix_layout, m, n, c, ldc, c_t, ldc_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_dormqr( &side, &trans, &m, &n, &k, a_t, &lda_t, tau, c_t, &ldc_t,\n                       work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_dge_trans( LAPACK_COL_MAJOR, m, n, c_t, ldc_t, c, ldc );\n        /* Release memory and exit */\n        LAPACKE_free( c_t );\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_dormqr_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_dormqr_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dpotrf.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function dpotrf\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dpotrf( int matrix_order, char uplo, lapack_int n, double* a,\n                           lapack_int lda )\n{\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_dpotrf\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_dpo_nancheck( matrix_order, uplo, n, a, lda ) ) {\n        return -4;\n    }\n#endif\n    return LAPACKE_dpotrf_work( matrix_order, uplo, n, a, lda );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dpotrf_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function dpotrf\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dpotrf_work( int matrix_order, char uplo, lapack_int n,\n                                double* a, lapack_int lda )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_dpotrf( &uplo, &n, a, &lda, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        double* a_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -5;\n            LAPACKE_xerbla( \"LAPACKE_dpotrf_work\", info );\n            return info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (double*)LAPACKE_malloc( sizeof(double) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        /* Transpose input matrices */\n        LAPACKE_dpo_trans( matrix_order, uplo, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_dpotrf( &uplo, &n, a_t, &lda_t, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_dpo_trans( LAPACK_COL_MAJOR, uplo, n, a_t, lda_t, a, lda );\n        /* Release memory and exit */\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_dpotrf_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_dpotrf_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dpotri.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function dpotri\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dpotri( int matrix_order, char uplo, lapack_int n, double* a,\n                           lapack_int lda )\n{\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_dpotri\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_dpo_nancheck( matrix_order, uplo, n, a, lda ) ) {\n        return -4;\n    }\n#endif\n    return LAPACKE_dpotri_work( matrix_order, uplo, n, a, lda );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dpotri_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function dpotri\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dpotri_work( int matrix_order, char uplo, lapack_int n,\n                                double* a, lapack_int lda )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_dpotri( &uplo, &n, a, &lda, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        double* a_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -5;\n            LAPACKE_xerbla( \"LAPACKE_dpotri_work\", info );\n            return info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (double*)LAPACKE_malloc( sizeof(double) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        /* Transpose input matrices */\n        LAPACKE_dpo_trans( matrix_order, uplo, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_dpotri( &uplo, &n, a_t, &lda_t, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_dpo_trans( LAPACK_COL_MAJOR, uplo, n, a_t, lda_t, a, lda );\n        /* Release memory and exit */\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_dpotri_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_dpotri_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dpotrs.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function dpotrs\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dpotrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* a, lapack_int lda,\n                           double* b, lapack_int ldb )\n{\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_dpotrs\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_dpo_nancheck( matrix_order, uplo, n, a, lda ) ) {\n        return -5;\n    }\n    if( LAPACKE_dge_nancheck( matrix_order, n, nrhs, b, ldb ) ) {\n        return -7;\n    }\n#endif\n    return LAPACKE_dpotrs_work( matrix_order, uplo, n, nrhs, a, lda, b, ldb );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_dpotrs_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function dpotrs\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_dpotrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* a,\n                                lapack_int lda, double* b, lapack_int ldb )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_dpotrs( &uplo, &n, &nrhs, a, &lda, b, &ldb, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        lapack_int ldb_t = MAX(1,n);\n        double* a_t = NULL;\n        double* b_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -6;\n            LAPACKE_xerbla( \"LAPACKE_dpotrs_work\", info );\n            return info;\n        }\n        if( ldb < nrhs ) {\n            info = -8;\n            LAPACKE_xerbla( \"LAPACKE_dpotrs_work\", info );\n            return info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (double*)LAPACKE_malloc( sizeof(double) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        b_t = (double*)LAPACKE_malloc( sizeof(double) * ldb_t * MAX(1,nrhs) );\n        if( b_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_1;\n        }\n        /* Transpose input matrices */\n        LAPACKE_dpo_trans( matrix_order, uplo, n, a, lda, a_t, lda_t );\n        LAPACKE_dge_trans( matrix_order, n, nrhs, b, ldb, b_t, ldb_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_dpotrs( &uplo, &n, &nrhs, a_t, &lda_t, b_t, &ldb_t, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_dge_trans( LAPACK_COL_MAJOR, n, nrhs, b_t, ldb_t, b, ldb );\n        /* Release memory and exit */\n        LAPACKE_free( b_t );\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_dpotrs_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_dpotrs_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_sgeev.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function sgeev\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_sgeev( int matrix_order, char jobvl, char jobvr,\n                          lapack_int n, float* a, lapack_int lda, float* wr,\n                          float* wi, float* vl, lapack_int ldvl, float* vr,\n                          lapack_int ldvr )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    float* work = NULL;\n    float work_query;\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_sgeev\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_sge_nancheck( matrix_order, n, n, a, lda ) ) {\n        return -5;\n    }\n#endif\n    /* Query optimal working array(s) size */\n    info = LAPACKE_sgeev_work( matrix_order, jobvl, jobvr, n, a, lda, wr, wi,\n                               vl, ldvl, vr, ldvr, &work_query, lwork );\n    if( info != 0 ) {\n        goto exit_level_0;\n    }\n    lwork = (lapack_int)work_query;\n    /* Allocate memory for work arrays */\n    work = (float*)LAPACKE_malloc( sizeof(float) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_sgeev_work( matrix_order, jobvl, jobvr, n, a, lda, wr, wi,\n                               vl, ldvl, vr, ldvr, work, lwork );\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_sgeev\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_sgeev_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function sgeev\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_sgeev_work( int matrix_order, char jobvl, char jobvr,\n                               lapack_int n, float* a, lapack_int lda,\n                               float* wr, float* wi, float* vl, lapack_int ldvl,\n                               float* vr, lapack_int ldvr, float* work,\n                               lapack_int lwork )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_sgeev( &jobvl, &jobvr, &n, a, &lda, wr, wi, vl, &ldvl, vr, &ldvr,\n                      work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        lapack_int ldvl_t = MAX(1,n);\n        lapack_int ldvr_t = MAX(1,n);\n        float* a_t = NULL;\n        float* vl_t = NULL;\n        float* vr_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -6;\n            LAPACKE_xerbla( \"LAPACKE_sgeev_work\", info );\n            return info;\n        }\n        if( ldvl < n ) {\n            info = -10;\n            LAPACKE_xerbla( \"LAPACKE_sgeev_work\", info );\n            return info;\n        }\n        if( ldvr < n ) {\n            info = -12;\n            LAPACKE_xerbla( \"LAPACKE_sgeev_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_sgeev( &jobvl, &jobvr, &n, a, &lda_t, wr, wi, vl, &ldvl_t,\n                          vr, &ldvr_t, work, &lwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (float*)LAPACKE_malloc( sizeof(float) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        if( LAPACKE_lsame( jobvl, 'v' ) ) {\n            vl_t = (float*)LAPACKE_malloc( sizeof(float) * ldvl_t * MAX(1,n) );\n            if( vl_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_1;\n            }\n        }\n        if( LAPACKE_lsame( jobvr, 'v' ) ) {\n            vr_t = (float*)LAPACKE_malloc( sizeof(float) * ldvr_t * MAX(1,n) );\n            if( vr_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_2;\n            }\n        }\n        /* Transpose input matrices */\n        LAPACKE_sge_trans( matrix_order, n, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_sgeev( &jobvl, &jobvr, &n, a_t, &lda_t, wr, wi, vl_t, &ldvl_t,\n                      vr_t, &ldvr_t, work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_sge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda );\n        if( LAPACKE_lsame( jobvl, 'v' ) ) {\n            LAPACKE_sge_trans( LAPACK_COL_MAJOR, n, n, vl_t, ldvl_t, vl, ldvl );\n        }\n        if( LAPACKE_lsame( jobvr, 'v' ) ) {\n            LAPACKE_sge_trans( LAPACK_COL_MAJOR, n, n, vr_t, ldvr_t, vr, ldvr );\n        }\n        /* Release memory and exit */\n        if( LAPACKE_lsame( jobvr, 'v' ) ) {\n            LAPACKE_free( vr_t );\n        }\nexit_level_2:\n        if( LAPACKE_lsame( jobvl, 'v' ) ) {\n            LAPACKE_free( vl_t );\n        }\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_sgeev_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_sgeev_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_sgeqrf.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2014, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function sgeqrf\n* Author: Intel Corporation\n* Generated November 2015\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_sgeqrf( int matrix_layout, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, float* tau )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    float* work = NULL;\n    float work_query;\n    if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_sgeqrf\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_sge_nancheck( matrix_layout, m, n, a, lda ) ) {\n        return -4;\n    }\n#endif\n    /* Query optimal working array(s) size */\n    info = LAPACKE_sgeqrf_work( matrix_layout, m, n, a, lda, tau, &work_query,\n                                lwork );\n    if( info != 0 ) {\n        goto exit_level_0;\n    }\n    lwork = (lapack_int)work_query;\n    /* Allocate memory for work arrays */\n    work = (float*)LAPACKE_malloc( sizeof(float) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_sgeqrf_work( matrix_layout, m, n, a, lda, tau, work, lwork );\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_sgeqrf\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_sgeqrf_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2014, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function sgeqrf\n* Author: Intel Corporation\n* Generated November 2015\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_sgeqrf_work( int matrix_layout, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, float* tau,\n                                float* work, lapack_int lwork )\n{\n    lapack_int info = 0;\n    if( matrix_layout == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_sgeqrf( &m, &n, a, &lda, tau, work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_layout == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,m);\n        float* a_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -5;\n            LAPACKE_xerbla( \"LAPACKE_sgeqrf_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_sgeqrf( &m, &n, a, &lda_t, tau, work, &lwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (float*)LAPACKE_malloc( sizeof(float) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        /* Transpose input matrices */\n        LAPACKE_sge_trans( matrix_layout, m, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_sgeqrf( &m, &n, a_t, &lda_t, tau, work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_sge_trans( LAPACK_COL_MAJOR, m, n, a_t, lda_t, a, lda );\n        /* Release memory and exit */\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_sgeqrf_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_sgeqrf_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_sgesdd.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function sgesdd\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_sgesdd( int matrix_order, char jobz, lapack_int m,\n                           lapack_int n, float* a, lapack_int lda, float* s,\n                           float* u, lapack_int ldu, float* vt,\n                           lapack_int ldvt )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    lapack_int* iwork = NULL;\n    float* work = NULL;\n    float work_query;\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_sgesdd\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_sge_nancheck( matrix_order, m, n, a, lda ) ) {\n        return -5;\n    }\n#endif\n    /* Allocate memory for working array(s) */\n    iwork = (lapack_int*)\n        LAPACKE_malloc( sizeof(lapack_int) * MAX(1,8*MIN(m,n)) );\n    if( iwork == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Query optimal working array(s) size */\n    info = LAPACKE_sgesdd_work( matrix_order, jobz, m, n, a, lda, s, u, ldu, vt,\n                                ldvt, &work_query, lwork, iwork );\n    if( info != 0 ) {\n        goto exit_level_1;\n    }\n    lwork = (lapack_int)work_query;\n    /* Allocate memory for work arrays */\n    work = (float*)LAPACKE_malloc( sizeof(float) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_1;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_sgesdd_work( matrix_order, jobz, m, n, a, lda, s, u, ldu, vt,\n                                ldvt, work, lwork, iwork );\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_1:\n    LAPACKE_free( iwork );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_sgesdd\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_sgesdd_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function sgesdd\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_sgesdd_work( int matrix_order, char jobz, lapack_int m,\n                                lapack_int n, float* a, lapack_int lda,\n                                float* s, float* u, lapack_int ldu, float* vt,\n                                lapack_int ldvt, float* work, lapack_int lwork,\n                                lapack_int* iwork )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_sgesdd( &jobz, &m, &n, a, &lda, s, u, &ldu, vt, &ldvt, work,\n                       &lwork, iwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int nrows_u = ( LAPACKE_lsame( jobz, 'a' ) ||\n                             LAPACKE_lsame( jobz, 's' ) ||\n                             ( LAPACKE_lsame( jobz, 'o' ) && m<n) ) ? m : 1;\n        lapack_int ncols_u = ( LAPACKE_lsame( jobz, 'a' ) ||\n                             ( LAPACKE_lsame( jobz, 'o' ) && m<n) ) ? m :\n                             ( LAPACKE_lsame( jobz, 's' ) ? MIN(m,n) : 1);\n        lapack_int nrows_vt = ( LAPACKE_lsame( jobz, 'a' ) ||\n                              ( LAPACKE_lsame( jobz, 'o' ) && m<n) ) ? n :\n                              ( LAPACKE_lsame( jobz, 's' ) ? MIN(m,n) : 1);\n        lapack_int lda_t = MAX(1,m);\n        lapack_int ldu_t = MAX(1,nrows_u);\n        lapack_int ldvt_t = MAX(1,nrows_vt);\n        float* a_t = NULL;\n        float* u_t = NULL;\n        float* vt_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -6;\n            LAPACKE_xerbla( \"LAPACKE_sgesdd_work\", info );\n            return info;\n        }\n        if( ldu < ncols_u ) {\n            info = -9;\n            LAPACKE_xerbla( \"LAPACKE_sgesdd_work\", info );\n            return info;\n        }\n        if( ldvt < n ) {\n            info = -11;\n            LAPACKE_xerbla( \"LAPACKE_sgesdd_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_sgesdd( &jobz, &m, &n, a, &lda_t, s, u, &ldu_t, vt, &ldvt_t,\n                           work, &lwork, iwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (float*)LAPACKE_malloc( sizeof(float) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m<n) ) ) {\n            u_t = (float*)\n                LAPACKE_malloc( sizeof(float) * ldu_t * MAX(1,ncols_u) );\n            if( u_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_1;\n            }\n        }\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m>=n) ) ) {\n            vt_t = (float*)LAPACKE_malloc( sizeof(float) * ldvt_t * MAX(1,n) );\n            if( vt_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_2;\n            }\n        }\n        /* Transpose input matrices */\n        LAPACKE_sge_trans( matrix_order, m, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_sgesdd( &jobz, &m, &n, a_t, &lda_t, s, u_t, &ldu_t, vt_t,\n                       &ldvt_t, work, &lwork, iwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_sge_trans( LAPACK_COL_MAJOR, m, n, a_t, lda_t, a, lda );\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m<n) ) ) {\n            LAPACKE_sge_trans( LAPACK_COL_MAJOR, nrows_u, ncols_u, u_t, ldu_t,\n                               u, ldu );\n        }\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m>=n) ) ) {\n            LAPACKE_sge_trans( LAPACK_COL_MAJOR, nrows_vt, n, vt_t, ldvt_t, vt,\n                               ldvt );\n        }\n        /* Release memory and exit */\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m>=n) ) ) {\n            LAPACKE_free( vt_t );\n        }\nexit_level_2:\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m<n) ) ) {\n            LAPACKE_free( u_t );\n        }\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_sgesdd_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_sgesdd_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_sgesvd.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function sgesvd\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_sgesvd( int matrix_order, char jobu, char jobvt,\n                           lapack_int m, lapack_int n, float* a, lapack_int lda,\n                           float* s, float* u, lapack_int ldu, float* vt,\n                           lapack_int ldvt, float* superb )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    float* work = NULL;\n    float work_query;\n    lapack_int i;\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_sgesvd\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_sge_nancheck( matrix_order, m, n, a, lda ) ) {\n        return -6;\n    }\n#endif\n    /* Query optimal working array(s) size */\n    info = LAPACKE_sgesvd_work( matrix_order, jobu, jobvt, m, n, a, lda, s, u,\n                                ldu, vt, ldvt, &work_query, lwork );\n    if( info != 0 ) {\n        goto exit_level_0;\n    }\n    lwork = (lapack_int)work_query;\n    /* Allocate memory for work arrays */\n    work = (float*)LAPACKE_malloc( sizeof(float) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_sgesvd_work( matrix_order, jobu, jobvt, m, n, a, lda, s, u,\n                                ldu, vt, ldvt, work, lwork );\n    /* Backup significant data from working array(s) */\n    for( i=0; i<MIN(m,n)-1; i++ ) {\n        superb[i] = work[i+1];\n    }\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_sgesvd\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_sgesvd_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function sgesvd\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_sgesvd_work( int matrix_order, char jobu, char jobvt,\n                                lapack_int m, lapack_int n, float* a,\n                                lapack_int lda, float* s, float* u,\n                                lapack_int ldu, float* vt, lapack_int ldvt,\n                                float* work, lapack_int lwork )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_sgesvd( &jobu, &jobvt, &m, &n, a, &lda, s, u, &ldu, vt, &ldvt,\n                       work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int nrows_u = ( LAPACKE_lsame( jobu, 'a' ) ||\n                             LAPACKE_lsame( jobu, 's' ) ) ? m : 1;\n        lapack_int ncols_u = LAPACKE_lsame( jobu, 'a' ) ? m :\n                             ( LAPACKE_lsame( jobu, 's' ) ? MIN(m,n) : 1);\n        lapack_int nrows_vt = LAPACKE_lsame( jobvt, 'a' ) ? n :\n                              ( LAPACKE_lsame( jobvt, 's' ) ? MIN(m,n) : 1);\n        lapack_int lda_t = MAX(1,m);\n        lapack_int ldu_t = MAX(1,nrows_u);\n        lapack_int ldvt_t = MAX(1,nrows_vt);\n        float* a_t = NULL;\n        float* u_t = NULL;\n        float* vt_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -7;\n            LAPACKE_xerbla( \"LAPACKE_sgesvd_work\", info );\n            return info;\n        }\n        if( ldu < ncols_u ) {\n            info = -10;\n            LAPACKE_xerbla( \"LAPACKE_sgesvd_work\", info );\n            return info;\n        }\n        if( ldvt < n ) {\n            info = -12;\n            LAPACKE_xerbla( \"LAPACKE_sgesvd_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_sgesvd( &jobu, &jobvt, &m, &n, a, &lda_t, s, u, &ldu_t, vt,\n                           &ldvt_t, work, &lwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (float*)LAPACKE_malloc( sizeof(float) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) {\n            u_t = (float*)\n                LAPACKE_malloc( sizeof(float) * ldu_t * MAX(1,ncols_u) );\n            if( u_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_1;\n            }\n        }\n        if( LAPACKE_lsame( jobvt, 'a' ) || LAPACKE_lsame( jobvt, 's' ) ) {\n            vt_t = (float*)LAPACKE_malloc( sizeof(float) * ldvt_t * MAX(1,n) );\n            if( vt_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_2;\n            }\n        }\n        /* Transpose input matrices */\n        LAPACKE_sge_trans( matrix_order, m, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_sgesvd( &jobu, &jobvt, &m, &n, a_t, &lda_t, s, u_t, &ldu_t, vt_t,\n                       &ldvt_t, work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_sge_trans( LAPACK_COL_MAJOR, m, n, a_t, lda_t, a, lda );\n        if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) {\n            LAPACKE_sge_trans( LAPACK_COL_MAJOR, nrows_u, ncols_u, u_t, ldu_t,\n                               u, ldu );\n        }\n        if( LAPACKE_lsame( jobvt, 'a' ) || LAPACKE_lsame( jobvt, 's' ) ) {\n            LAPACKE_sge_trans( LAPACK_COL_MAJOR, nrows_vt, n, vt_t, ldvt_t, vt,\n                               ldvt );\n        }\n        /* Release memory and exit */\n        if( LAPACKE_lsame( jobvt, 'a' ) || LAPACKE_lsame( jobvt, 's' ) ) {\n            LAPACKE_free( vt_t );\n        }\nexit_level_2:\n        if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) {\n            LAPACKE_free( u_t );\n        }\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_sgesvd_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_sgesvd_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_sgetrf.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function sgetrf\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_sgetrf( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, lapack_int* ipiv )\n{\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_sgetrf\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_sge_nancheck( matrix_order, m, n, a, lda ) ) {\n        return -4;\n    }\n#endif\n    return LAPACKE_sgetrf_work( matrix_order, m, n, a, lda, ipiv );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_sgetrf_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function sgetrf\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_sgetrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, lapack_int* ipiv )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_sgetrf( &m, &n, a, &lda, ipiv, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,m);\n        float* a_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -5;\n            LAPACKE_xerbla( \"LAPACKE_sgetrf_work\", info );\n            return info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (float*)LAPACKE_malloc( sizeof(float) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        /* Transpose input matrices */\n        LAPACKE_sge_trans( matrix_order, m, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_sgetrf( &m, &n, a_t, &lda_t, ipiv, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_sge_trans( LAPACK_COL_MAJOR, m, n, a_t, lda_t, a, lda );\n        /* Release memory and exit */\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_sgetrf_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_sgetrf_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_sgetri.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function sgetri\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_sgetri( int matrix_order, lapack_int n, float* a,\n                           lapack_int lda, const lapack_int* ipiv )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    float* work = NULL;\n    float work_query;\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_sgetri\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_sge_nancheck( matrix_order, n, n, a, lda ) ) {\n        return -3;\n    }\n#endif\n    /* Query optimal working array(s) size */\n    info = LAPACKE_sgetri_work( matrix_order, n, a, lda, ipiv, &work_query,\n                                lwork );\n    if( info != 0 ) {\n        goto exit_level_0;\n    }\n    lwork = (lapack_int)work_query;\n    /* Allocate memory for work arrays */\n    work = (float*)LAPACKE_malloc( sizeof(float) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_sgetri_work( matrix_order, n, a, lda, ipiv, work, lwork );\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_sgetri\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_sgetri_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function sgetri\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_sgetri_work( int matrix_order, lapack_int n, float* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                float* work, lapack_int lwork )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_sgetri( &n, a, &lda, ipiv, work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        float* a_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -4;\n            LAPACKE_xerbla( \"LAPACKE_sgetri_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_sgetri( &n, a, &lda_t, ipiv, work, &lwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (float*)LAPACKE_malloc( sizeof(float) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        /* Transpose input matrices */\n        LAPACKE_sge_trans( matrix_order, n, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_sgetri( &n, a_t, &lda_t, ipiv, work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_sge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda );\n        /* Release memory and exit */\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_sgetri_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_sgetri_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_sgetrs.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function sgetrs\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_sgetrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const float* a, lapack_int lda,\n                           const lapack_int* ipiv, float* b, lapack_int ldb )\n{\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_sgetrs\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_sge_nancheck( matrix_order, n, n, a, lda ) ) {\n        return -5;\n    }\n    if( LAPACKE_sge_nancheck( matrix_order, n, nrhs, b, ldb ) ) {\n        return -8;\n    }\n#endif\n    return LAPACKE_sgetrs_work( matrix_order, trans, n, nrhs, a, lda, ipiv, b,\n                                ldb );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_sgetrs_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function sgetrs\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_sgetrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const float* a, lapack_int lda,\n                                const lapack_int* ipiv, float* b,\n                                lapack_int ldb )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_sgetrs( &trans, &n, &nrhs, a, &lda, ipiv, b, &ldb, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        lapack_int ldb_t = MAX(1,n);\n        float* a_t = NULL;\n        float* b_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -6;\n            LAPACKE_xerbla( \"LAPACKE_sgetrs_work\", info );\n            return info;\n        }\n        if( ldb < nrhs ) {\n            info = -9;\n            LAPACKE_xerbla( \"LAPACKE_sgetrs_work\", info );\n            return info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (float*)LAPACKE_malloc( sizeof(float) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        b_t = (float*)LAPACKE_malloc( sizeof(float) * ldb_t * MAX(1,nrhs) );\n        if( b_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_1;\n        }\n        /* Transpose input matrices */\n        LAPACKE_sge_trans( matrix_order, n, n, a, lda, a_t, lda_t );\n        LAPACKE_sge_trans( matrix_order, n, nrhs, b, ldb, b_t, ldb_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_sgetrs( &trans, &n, &nrhs, a_t, &lda_t, ipiv, b_t, &ldb_t,\n                       &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_sge_trans( LAPACK_COL_MAJOR, n, nrhs, b_t, ldb_t, b, ldb );\n        /* Release memory and exit */\n        LAPACKE_free( b_t );\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_sgetrs_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_sgetrs_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_sormqr.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2014, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function sormqr\n* Author: Intel Corporation\n* Generated November 2015\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_sormqr( int matrix_layout, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const float* a, lapack_int lda, const float* tau,\n                           float* c, lapack_int ldc )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    float* work = NULL;\n    float work_query;\n    lapack_int r;\n    if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_sormqr\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    r = LAPACKE_lsame( side, 'l' ) ? m : n;\n    if( LAPACKE_sge_nancheck( matrix_layout, r, k, a, lda ) ) {\n        return -7;\n    }\n    if( LAPACKE_sge_nancheck( matrix_layout, m, n, c, ldc ) ) {\n        return -10;\n    }\n    if( LAPACKE_s_nancheck( k, tau, 1 ) ) {\n        return -9;\n    }\n#endif\n    /* Query optimal working array(s) size */\n    info = LAPACKE_sormqr_work( matrix_layout, side, trans, m, n, k, a, lda, tau,\n                                c, ldc, &work_query, lwork );\n    if( info != 0 ) {\n        goto exit_level_0;\n    }\n    lwork = (lapack_int)work_query;\n    /* Allocate memory for work arrays */\n    work = (float*)LAPACKE_malloc( sizeof(float) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_sormqr_work( matrix_layout, side, trans, m, n, k, a, lda, tau,\n                                c, ldc, work, lwork );\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_sormqr\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_sormqr_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2014, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function sormqr\n* Author: Intel Corporation\n* Generated November 2015\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_sormqr_work( int matrix_layout, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const float* a, lapack_int lda,\n                                const float* tau, float* c, lapack_int ldc,\n                                float* work, lapack_int lwork )\n{\n    lapack_int info = 0;\n    lapack_int r;\n    lapack_int lda_t, ldc_t;\n    float *a_t = NULL, *c_t = NULL;\n    if( matrix_layout == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_sormqr( &side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work,\n                       &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_layout == LAPACK_ROW_MAJOR ) {\n        r = LAPACKE_lsame( side, 'l' ) ? m : n;\n        lda_t = MAX(1,r);\n        ldc_t = MAX(1,m);\n        /* Check leading dimension(s) */\n        if( lda < k ) {\n            info = -8;\n            LAPACKE_xerbla( \"LAPACKE_sormqr_work\", info );\n            return info;\n        }\n        if( ldc < n ) {\n            info = -11;\n            LAPACKE_xerbla( \"LAPACKE_sormqr_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_sormqr( &side, &trans, &m, &n, &k, a, &lda_t, tau, c, &ldc_t,\n                           work, &lwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (float*)LAPACKE_malloc( sizeof(float) * lda_t * MAX(1,k) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        c_t = (float*)LAPACKE_malloc( sizeof(float) * ldc_t * MAX(1,n) );\n        if( c_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_1;\n        }\n        /* Transpose input matrices */\n        LAPACKE_sge_trans( matrix_layout, r, k, a, lda, a_t, lda_t );\n        LAPACKE_sge_trans( matrix_layout, m, n, c, ldc, c_t, ldc_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_sormqr( &side, &trans, &m, &n, &k, a_t, &lda_t, tau, c_t, &ldc_t,\n                       work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_sge_trans( LAPACK_COL_MAJOR, m, n, c_t, ldc_t, c, ldc );\n        /* Release memory and exit */\n        LAPACKE_free( c_t );\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_sormqr_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_sormqr_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_spotrf.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function spotrf\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_spotrf( int matrix_order, char uplo, lapack_int n, float* a,\n                           lapack_int lda )\n{\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_spotrf\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_spo_nancheck( matrix_order, uplo, n, a, lda ) ) {\n        return -4;\n    }\n#endif\n    return LAPACKE_spotrf_work( matrix_order, uplo, n, a, lda );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_spotrf_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function spotrf\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_spotrf_work( int matrix_order, char uplo, lapack_int n,\n                                float* a, lapack_int lda )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_spotrf( &uplo, &n, a, &lda, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        float* a_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -5;\n            LAPACKE_xerbla( \"LAPACKE_spotrf_work\", info );\n            return info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (float*)LAPACKE_malloc( sizeof(float) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        /* Transpose input matrices */\n        LAPACKE_spo_trans( matrix_order, uplo, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_spotrf( &uplo, &n, a_t, &lda_t, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_spo_trans( LAPACK_COL_MAJOR, uplo, n, a_t, lda_t, a, lda );\n        /* Release memory and exit */\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_spotrf_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_spotrf_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_spotri.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function spotri\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_spotri( int matrix_order, char uplo, lapack_int n, float* a,\n                           lapack_int lda )\n{\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_spotri\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_spo_nancheck( matrix_order, uplo, n, a, lda ) ) {\n        return -4;\n    }\n#endif\n    return LAPACKE_spotri_work( matrix_order, uplo, n, a, lda );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_spotri_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function spotri\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_spotri_work( int matrix_order, char uplo, lapack_int n,\n                                float* a, lapack_int lda )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_spotri( &uplo, &n, a, &lda, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        float* a_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -5;\n            LAPACKE_xerbla( \"LAPACKE_spotri_work\", info );\n            return info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (float*)LAPACKE_malloc( sizeof(float) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        /* Transpose input matrices */\n        LAPACKE_spo_trans( matrix_order, uplo, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_spotri( &uplo, &n, a_t, &lda_t, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_spo_trans( LAPACK_COL_MAJOR, uplo, n, a_t, lda_t, a, lda );\n        /* Release memory and exit */\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_spotri_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_spotri_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_spotrs.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function spotrs\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_spotrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* a, lapack_int lda,\n                           float* b, lapack_int ldb )\n{\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_spotrs\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_spo_nancheck( matrix_order, uplo, n, a, lda ) ) {\n        return -5;\n    }\n    if( LAPACKE_sge_nancheck( matrix_order, n, nrhs, b, ldb ) ) {\n        return -7;\n    }\n#endif\n    return LAPACKE_spotrs_work( matrix_order, uplo, n, nrhs, a, lda, b, ldb );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_spotrs_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function spotrs\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_spotrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* a, lapack_int lda,\n                                float* b, lapack_int ldb )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_spotrs( &uplo, &n, &nrhs, a, &lda, b, &ldb, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        lapack_int ldb_t = MAX(1,n);\n        float* a_t = NULL;\n        float* b_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -6;\n            LAPACKE_xerbla( \"LAPACKE_spotrs_work\", info );\n            return info;\n        }\n        if( ldb < nrhs ) {\n            info = -8;\n            LAPACKE_xerbla( \"LAPACKE_spotrs_work\", info );\n            return info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (float*)LAPACKE_malloc( sizeof(float) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        b_t = (float*)LAPACKE_malloc( sizeof(float) * ldb_t * MAX(1,nrhs) );\n        if( b_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_1;\n        }\n        /* Transpose input matrices */\n        LAPACKE_spo_trans( matrix_order, uplo, n, a, lda, a_t, lda_t );\n        LAPACKE_sge_trans( matrix_order, n, nrhs, b, ldb, b_t, ldb_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_spotrs( &uplo, &n, &nrhs, a_t, &lda_t, b_t, &ldb_t, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_sge_trans( LAPACK_COL_MAJOR, n, nrhs, b_t, ldb_t, b, ldb );\n        /* Release memory and exit */\n        LAPACKE_free( b_t );\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_spotrs_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_spotrs_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zgeev.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function zgeev\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zgeev( int matrix_order, char jobvl, char jobvr,\n                          lapack_int n, lapack_complex_double* a,\n                          lapack_int lda, lapack_complex_double* w,\n                          lapack_complex_double* vl, lapack_int ldvl,\n                          lapack_complex_double* vr, lapack_int ldvr )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    double* rwork = NULL;\n    lapack_complex_double* work = NULL;\n    lapack_complex_double work_query;\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_zgeev\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_zge_nancheck( matrix_order, n, n, a, lda ) ) {\n        return -5;\n    }\n#endif\n    /* Allocate memory for working array(s) */\n    rwork = (double*)LAPACKE_malloc( sizeof(double) * MAX(1,2*n) );\n    if( rwork == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Query optimal working array(s) size */\n    info = LAPACKE_zgeev_work( matrix_order, jobvl, jobvr, n, a, lda, w, vl,\n                               ldvl, vr, ldvr, &work_query, lwork, rwork );\n    if( info != 0 ) {\n        goto exit_level_1;\n    }\n    lwork = LAPACK_Z2INT( work_query );\n    /* Allocate memory for work arrays */\n    work = (lapack_complex_double*)\n        LAPACKE_malloc( sizeof(lapack_complex_double) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_1;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_zgeev_work( matrix_order, jobvl, jobvr, n, a, lda, w, vl,\n                               ldvl, vr, ldvr, work, lwork, rwork );\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_1:\n    LAPACKE_free( rwork );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_zgeev\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zgeev_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function zgeev\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zgeev_work( int matrix_order, char jobvl, char jobvr,\n                               lapack_int n, lapack_complex_double* a,\n                               lapack_int lda, lapack_complex_double* w,\n                               lapack_complex_double* vl, lapack_int ldvl,\n                               lapack_complex_double* vr, lapack_int ldvr,\n                               lapack_complex_double* work, lapack_int lwork,\n                               double* rwork )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_zgeev( &jobvl, &jobvr, &n, a, &lda, w, vl, &ldvl, vr, &ldvr,\n                      work, &lwork, rwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        lapack_int ldvl_t = MAX(1,n);\n        lapack_int ldvr_t = MAX(1,n);\n        lapack_complex_double* a_t = NULL;\n        lapack_complex_double* vl_t = NULL;\n        lapack_complex_double* vr_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -6;\n            LAPACKE_xerbla( \"LAPACKE_zgeev_work\", info );\n            return info;\n        }\n        if( ldvl < n ) {\n            info = -9;\n            LAPACKE_xerbla( \"LAPACKE_zgeev_work\", info );\n            return info;\n        }\n        if( ldvr < n ) {\n            info = -11;\n            LAPACKE_xerbla( \"LAPACKE_zgeev_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_zgeev( &jobvl, &jobvr, &n, a, &lda_t, w, vl, &ldvl_t, vr,\n                          &ldvr_t, work, &lwork, rwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_double*)\n            LAPACKE_malloc( sizeof(lapack_complex_double) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        if( LAPACKE_lsame( jobvl, 'v' ) ) {\n            vl_t = (lapack_complex_double*)\n                LAPACKE_malloc( sizeof(lapack_complex_double) *\n                                ldvl_t * MAX(1,n) );\n            if( vl_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_1;\n            }\n        }\n        if( LAPACKE_lsame( jobvr, 'v' ) ) {\n            vr_t = (lapack_complex_double*)\n                LAPACKE_malloc( sizeof(lapack_complex_double) *\n                                ldvr_t * MAX(1,n) );\n            if( vr_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_2;\n            }\n        }\n        /* Transpose input matrices */\n        LAPACKE_zge_trans( matrix_order, n, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_zgeev( &jobvl, &jobvr, &n, a_t, &lda_t, w, vl_t, &ldvl_t, vr_t,\n                      &ldvr_t, work, &lwork, rwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_zge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda );\n        if( LAPACKE_lsame( jobvl, 'v' ) ) {\n            LAPACKE_zge_trans( LAPACK_COL_MAJOR, n, n, vl_t, ldvl_t, vl, ldvl );\n        }\n        if( LAPACKE_lsame( jobvr, 'v' ) ) {\n            LAPACKE_zge_trans( LAPACK_COL_MAJOR, n, n, vr_t, ldvr_t, vr, ldvr );\n        }\n        /* Release memory and exit */\n        if( LAPACKE_lsame( jobvr, 'v' ) ) {\n            LAPACKE_free( vr_t );\n        }\nexit_level_2:\n        if( LAPACKE_lsame( jobvl, 'v' ) ) {\n            LAPACKE_free( vl_t );\n        }\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_zgeev_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_zgeev_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zgeqrf.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2014, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function zgeqrf\n* Author: Intel Corporation\n* Generated November 2015\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zgeqrf( int matrix_layout, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* tau )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    lapack_complex_double* work = NULL;\n    lapack_complex_double work_query;\n    if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_zgeqrf\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_zge_nancheck( matrix_layout, m, n, a, lda ) ) {\n        return -4;\n    }\n#endif\n    /* Query optimal working array(s) size */\n    info = LAPACKE_zgeqrf_work( matrix_layout, m, n, a, lda, tau, &work_query,\n                                lwork );\n    if( info != 0 ) {\n        goto exit_level_0;\n    }\n    lwork = LAPACK_Z2INT( work_query );\n    /* Allocate memory for work arrays */\n    work = (lapack_complex_double*)\n        LAPACKE_malloc( sizeof(lapack_complex_double) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_zgeqrf_work( matrix_layout, m, n, a, lda, tau, work, lwork );\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_zgeqrf\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zgeqrf_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2014, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function zgeqrf\n* Author: Intel Corporation\n* Generated November 2015\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zgeqrf_work( int matrix_layout, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork )\n{\n    lapack_int info = 0;\n    if( matrix_layout == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_zgeqrf( &m, &n, a, &lda, tau, work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_layout == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,m);\n        lapack_complex_double* a_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -5;\n            LAPACKE_xerbla( \"LAPACKE_zgeqrf_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_zgeqrf( &m, &n, a, &lda_t, tau, work, &lwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_double*)\n            LAPACKE_malloc( sizeof(lapack_complex_double) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        /* Transpose input matrices */\n        LAPACKE_zge_trans( matrix_layout, m, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_zgeqrf( &m, &n, a_t, &lda_t, tau, work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_zge_trans( LAPACK_COL_MAJOR, m, n, a_t, lda_t, a, lda );\n        /* Release memory and exit */\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_zgeqrf_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_zgeqrf_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zgesdd.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function zgesdd\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zgesdd( int matrix_order, char jobz, lapack_int m,\n                           lapack_int n, lapack_complex_double* a,\n                           lapack_int lda, double* s, lapack_complex_double* u,\n                           lapack_int ldu, lapack_complex_double* vt,\n                           lapack_int ldvt )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    /* Additional scalars declarations for work arrays */\n    size_t lrwork;\n    lapack_int* iwork = NULL;\n    double* rwork = NULL;\n    lapack_complex_double* work = NULL;\n    lapack_complex_double work_query;\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_zgesdd\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_zge_nancheck( matrix_order, m, n, a, lda ) ) {\n        return -5;\n    }\n#endif\n    /* Additional scalars initializations for work arrays */\n    if( LAPACKE_lsame( jobz, 'n' ) ) {\n        lrwork = MAX(1,5*MIN(m,n));\n    } else {\n        lrwork = (size_t)5*MAX(1,MIN(m,n))*MAX(1,MIN(m,n))+7*MIN(m,n);\n    }\n    /* Allocate memory for working array(s) */\n    iwork = (lapack_int*)\n        LAPACKE_malloc( sizeof(lapack_int) * MAX(1,8*MIN(m,n)) );\n    if( iwork == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    rwork = (double*)LAPACKE_malloc( sizeof(double) * lrwork );\n    if( rwork == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_1;\n    }\n    /* Query optimal working array(s) size */\n    info = LAPACKE_zgesdd_work( matrix_order, jobz, m, n, a, lda, s, u, ldu, vt,\n                                ldvt, &work_query, lwork, rwork, iwork );\n    if( info != 0 ) {\n        goto exit_level_2;\n    }\n    lwork = LAPACK_Z2INT( work_query );\n    /* Allocate memory for work arrays */\n    work = (lapack_complex_double*)\n        LAPACKE_malloc( sizeof(lapack_complex_double) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_2;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_zgesdd_work( matrix_order, jobz, m, n, a, lda, s, u, ldu, vt,\n                                ldvt, work, lwork, rwork, iwork );\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_2:\n    LAPACKE_free( rwork );\nexit_level_1:\n    LAPACKE_free( iwork );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_zgesdd\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zgesdd_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function zgesdd\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zgesdd_work( int matrix_order, char jobz, lapack_int m,\n                                lapack_int n, lapack_complex_double* a,\n                                lapack_int lda, double* s,\n                                lapack_complex_double* u, lapack_int ldu,\n                                lapack_complex_double* vt, lapack_int ldvt,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork, lapack_int* iwork )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_zgesdd( &jobz, &m, &n, a, &lda, s, u, &ldu, vt, &ldvt, work,\n                       &lwork, rwork, iwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int nrows_u = ( LAPACKE_lsame( jobz, 'a' ) ||\n                             LAPACKE_lsame( jobz, 's' ) ||\n                             ( LAPACKE_lsame( jobz, 'o' ) && m<n) ) ? m : 1;\n        lapack_int ncols_u = ( LAPACKE_lsame( jobz, 'a' ) ||\n                             ( LAPACKE_lsame( jobz, 'o' ) && m<n) ) ? m :\n                             ( LAPACKE_lsame( jobz, 's' ) ? MIN(m,n) : 1);\n        lapack_int nrows_vt = ( LAPACKE_lsame( jobz, 'a' ) ||\n                              ( LAPACKE_lsame( jobz, 'o' ) && m<n) ) ? n :\n                              ( LAPACKE_lsame( jobz, 's' ) ? MIN(m,n) : 1);\n        lapack_int lda_t = MAX(1,m);\n        lapack_int ldu_t = MAX(1,nrows_u);\n        lapack_int ldvt_t = MAX(1,nrows_vt);\n        lapack_complex_double* a_t = NULL;\n        lapack_complex_double* u_t = NULL;\n        lapack_complex_double* vt_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -6;\n            LAPACKE_xerbla( \"LAPACKE_zgesdd_work\", info );\n            return info;\n        }\n        if( ldu < ncols_u ) {\n            info = -9;\n            LAPACKE_xerbla( \"LAPACKE_zgesdd_work\", info );\n            return info;\n        }\n        if( ldvt < n ) {\n            info = -11;\n            LAPACKE_xerbla( \"LAPACKE_zgesdd_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_zgesdd( &jobz, &m, &n, a, &lda_t, s, u, &ldu_t, vt, &ldvt_t,\n                           work, &lwork, rwork, iwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_double*)\n            LAPACKE_malloc( sizeof(lapack_complex_double) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m<n) ) ) {\n            u_t = (lapack_complex_double*)\n                LAPACKE_malloc( sizeof(lapack_complex_double) *\n                                ldu_t * MAX(1,ncols_u) );\n            if( u_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_1;\n            }\n        }\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m>=n) ) ) {\n            vt_t = (lapack_complex_double*)\n                LAPACKE_malloc( sizeof(lapack_complex_double) *\n                                ldvt_t * MAX(1,n) );\n            if( vt_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_2;\n            }\n        }\n        /* Transpose input matrices */\n        LAPACKE_zge_trans( matrix_order, m, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_zgesdd( &jobz, &m, &n, a_t, &lda_t, s, u_t, &ldu_t, vt_t,\n                       &ldvt_t, work, &lwork, rwork, iwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_zge_trans( LAPACK_COL_MAJOR, m, n, a_t, lda_t, a, lda );\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m<n) ) ) {\n            LAPACKE_zge_trans( LAPACK_COL_MAJOR, nrows_u, ncols_u, u_t, ldu_t,\n                               u, ldu );\n        }\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m>=n) ) ) {\n            LAPACKE_zge_trans( LAPACK_COL_MAJOR, nrows_vt, n, vt_t, ldvt_t, vt,\n                               ldvt );\n        }\n        /* Release memory and exit */\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m>=n) ) ) {\n            LAPACKE_free( vt_t );\n        }\nexit_level_2:\n        if( LAPACKE_lsame( jobz, 'a' ) || LAPACKE_lsame( jobz, 's' ) ||\n            ( LAPACKE_lsame( jobz, 'o' ) && (m<n) ) ) {\n            LAPACKE_free( u_t );\n        }\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_zgesdd_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_zgesdd_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zgesvd.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function zgesvd\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zgesvd( int matrix_order, char jobu, char jobvt,\n                           lapack_int m, lapack_int n, lapack_complex_double* a,\n                           lapack_int lda, double* s, lapack_complex_double* u,\n                           lapack_int ldu, lapack_complex_double* vt,\n                           lapack_int ldvt, double* superb )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    double* rwork = NULL;\n    lapack_complex_double* work = NULL;\n    lapack_complex_double work_query;\n    lapack_int i;\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_zgesvd\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_zge_nancheck( matrix_order, m, n, a, lda ) ) {\n        return -6;\n    }\n#endif\n    /* Allocate memory for working array(s) */\n    rwork = (double*)LAPACKE_malloc( sizeof(double) * MAX(1,5*MIN(m,n)) );\n    if( rwork == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Query optimal working array(s) size */\n    info = LAPACKE_zgesvd_work( matrix_order, jobu, jobvt, m, n, a, lda, s, u,\n                                ldu, vt, ldvt, &work_query, lwork, rwork );\n    if( info != 0 ) {\n        goto exit_level_1;\n    }\n    lwork = LAPACK_Z2INT( work_query );\n    /* Allocate memory for work arrays */\n    work = (lapack_complex_double*)\n        LAPACKE_malloc( sizeof(lapack_complex_double) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_1;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_zgesvd_work( matrix_order, jobu, jobvt, m, n, a, lda, s, u,\n                                ldu, vt, ldvt, work, lwork, rwork );\n    /* Backup significant data from working array(s) */\n    for( i=0; i<MIN(m,n)-1; i++ ) {\n        superb[i] = rwork[i];\n    }\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_1:\n    LAPACKE_free( rwork );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_zgesvd\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zgesvd_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function zgesvd\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zgesvd_work( int matrix_order, char jobu, char jobvt,\n                                lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                double* s, lapack_complex_double* u,\n                                lapack_int ldu, lapack_complex_double* vt,\n                                lapack_int ldvt, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_zgesvd( &jobu, &jobvt, &m, &n, a, &lda, s, u, &ldu, vt, &ldvt,\n                       work, &lwork, rwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int nrows_u = ( LAPACKE_lsame( jobu, 'a' ) ||\n                             LAPACKE_lsame( jobu, 's' ) ) ? m : 1;\n        lapack_int ncols_u = LAPACKE_lsame( jobu, 'a' ) ? m :\n                             ( LAPACKE_lsame( jobu, 's' ) ? MIN(m,n) : 1);\n        lapack_int nrows_vt = LAPACKE_lsame( jobvt, 'a' ) ? n :\n                              ( LAPACKE_lsame( jobvt, 's' ) ? MIN(m,n) : 1);\n        lapack_int lda_t = MAX(1,m);\n        lapack_int ldu_t = MAX(1,nrows_u);\n        lapack_int ldvt_t = MAX(1,nrows_vt);\n        lapack_complex_double* a_t = NULL;\n        lapack_complex_double* u_t = NULL;\n        lapack_complex_double* vt_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -7;\n            LAPACKE_xerbla( \"LAPACKE_zgesvd_work\", info );\n            return info;\n        }\n        if( ldu < ncols_u ) {\n            info = -10;\n            LAPACKE_xerbla( \"LAPACKE_zgesvd_work\", info );\n            return info;\n        }\n        if( ldvt < n ) {\n            info = -12;\n            LAPACKE_xerbla( \"LAPACKE_zgesvd_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_zgesvd( &jobu, &jobvt, &m, &n, a, &lda_t, s, u, &ldu_t, vt,\n                           &ldvt_t, work, &lwork, rwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_double*)\n            LAPACKE_malloc( sizeof(lapack_complex_double) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) {\n            u_t = (lapack_complex_double*)\n                LAPACKE_malloc( sizeof(lapack_complex_double) *\n                                ldu_t * MAX(1,ncols_u) );\n            if( u_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_1;\n            }\n        }\n        if( LAPACKE_lsame( jobvt, 'a' ) || LAPACKE_lsame( jobvt, 's' ) ) {\n            vt_t = (lapack_complex_double*)\n                LAPACKE_malloc( sizeof(lapack_complex_double) *\n                                ldvt_t * MAX(1,n) );\n            if( vt_t == NULL ) {\n                info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n                goto exit_level_2;\n            }\n        }\n        /* Transpose input matrices */\n        LAPACKE_zge_trans( matrix_order, m, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_zgesvd( &jobu, &jobvt, &m, &n, a_t, &lda_t, s, u_t, &ldu_t, vt_t,\n                       &ldvt_t, work, &lwork, rwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_zge_trans( LAPACK_COL_MAJOR, m, n, a_t, lda_t, a, lda );\n        if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) {\n            LAPACKE_zge_trans( LAPACK_COL_MAJOR, nrows_u, ncols_u, u_t, ldu_t,\n                               u, ldu );\n        }\n        if( LAPACKE_lsame( jobvt, 'a' ) || LAPACKE_lsame( jobvt, 's' ) ) {\n            LAPACKE_zge_trans( LAPACK_COL_MAJOR, nrows_vt, n, vt_t, ldvt_t, vt,\n                               ldvt );\n        }\n        /* Release memory and exit */\n        if( LAPACKE_lsame( jobvt, 'a' ) || LAPACKE_lsame( jobvt, 's' ) ) {\n            LAPACKE_free( vt_t );\n        }\nexit_level_2:\n        if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) {\n            LAPACKE_free( u_t );\n        }\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_zgesvd_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_zgesvd_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zgetrf.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function zgetrf\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zgetrf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* ipiv )\n{\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_zgetrf\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_zge_nancheck( matrix_order, m, n, a, lda ) ) {\n        return -4;\n    }\n#endif\n    return LAPACKE_zgetrf_work( matrix_order, m, n, a, lda, ipiv );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zgetrf_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function zgetrf\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zgetrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* ipiv )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_zgetrf( &m, &n, a, &lda, ipiv, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,m);\n        lapack_complex_double* a_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -5;\n            LAPACKE_xerbla( \"LAPACKE_zgetrf_work\", info );\n            return info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_double*)\n            LAPACKE_malloc( sizeof(lapack_complex_double) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        /* Transpose input matrices */\n        LAPACKE_zge_trans( matrix_order, m, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_zgetrf( &m, &n, a_t, &lda_t, ipiv, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_zge_trans( LAPACK_COL_MAJOR, m, n, a_t, lda_t, a, lda );\n        /* Release memory and exit */\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_zgetrf_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_zgetrf_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zgetri.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function zgetri\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zgetri( int matrix_order, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           const lapack_int* ipiv )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    lapack_complex_double* work = NULL;\n    lapack_complex_double work_query;\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_zgetri\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_zge_nancheck( matrix_order, n, n, a, lda ) ) {\n        return -3;\n    }\n#endif\n    /* Query optimal working array(s) size */\n    info = LAPACKE_zgetri_work( matrix_order, n, a, lda, ipiv, &work_query,\n                                lwork );\n    if( info != 0 ) {\n        goto exit_level_0;\n    }\n    lwork = LAPACK_Z2INT( work_query );\n    /* Allocate memory for work arrays */\n    work = (lapack_complex_double*)\n        LAPACKE_malloc( sizeof(lapack_complex_double) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_zgetri_work( matrix_order, n, a, lda, ipiv, work, lwork );\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_zgetri\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zgetri_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function zgetri\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zgetri_work( int matrix_order, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                const lapack_int* ipiv,\n                                lapack_complex_double* work, lapack_int lwork )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_zgetri( &n, a, &lda, ipiv, work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        lapack_complex_double* a_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -4;\n            LAPACKE_xerbla( \"LAPACKE_zgetri_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_zgetri( &n, a, &lda_t, ipiv, work, &lwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_double*)\n            LAPACKE_malloc( sizeof(lapack_complex_double) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        /* Transpose input matrices */\n        LAPACKE_zge_trans( matrix_order, n, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_zgetri( &n, a_t, &lda_t, ipiv, work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_zge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda );\n        /* Release memory and exit */\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_zgetri_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_zgetri_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zgetrs.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function zgetrs\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zgetrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, const lapack_int* ipiv,\n                           lapack_complex_double* b, lapack_int ldb )\n{\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_zgetrs\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_zge_nancheck( matrix_order, n, n, a, lda ) ) {\n        return -5;\n    }\n    if( LAPACKE_zge_nancheck( matrix_order, n, nrhs, b, ldb ) ) {\n        return -8;\n    }\n#endif\n    return LAPACKE_zgetrs_work( matrix_order, trans, n, nrhs, a, lda, ipiv, b,\n                                ldb );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zgetrs_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function zgetrs\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zgetrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_double* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                lapack_complex_double* b, lapack_int ldb )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_zgetrs( &trans, &n, &nrhs, a, &lda, ipiv, b, &ldb, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        lapack_int ldb_t = MAX(1,n);\n        lapack_complex_double* a_t = NULL;\n        lapack_complex_double* b_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -6;\n            LAPACKE_xerbla( \"LAPACKE_zgetrs_work\", info );\n            return info;\n        }\n        if( ldb < nrhs ) {\n            info = -9;\n            LAPACKE_xerbla( \"LAPACKE_zgetrs_work\", info );\n            return info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_double*)\n            LAPACKE_malloc( sizeof(lapack_complex_double) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        b_t = (lapack_complex_double*)\n            LAPACKE_malloc( sizeof(lapack_complex_double) *\n                            ldb_t * MAX(1,nrhs) );\n        if( b_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_1;\n        }\n        /* Transpose input matrices */\n        LAPACKE_zge_trans( matrix_order, n, n, a, lda, a_t, lda_t );\n        LAPACKE_zge_trans( matrix_order, n, nrhs, b, ldb, b_t, ldb_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_zgetrs( &trans, &n, &nrhs, a_t, &lda_t, ipiv, b_t, &ldb_t,\n                       &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_zge_trans( LAPACK_COL_MAJOR, n, nrhs, b_t, ldb_t, b, ldb );\n        /* Release memory and exit */\n        LAPACKE_free( b_t );\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_zgetrs_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_zgetrs_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zpotrf.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function zpotrf\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zpotrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda )\n{\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_zpotrf\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_zpo_nancheck( matrix_order, uplo, n, a, lda ) ) {\n        return -4;\n    }\n#endif\n    return LAPACKE_zpotrf_work( matrix_order, uplo, n, a, lda );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zpotrf_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function zpotrf\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zpotrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_zpotrf( &uplo, &n, a, &lda, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        lapack_complex_double* a_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -5;\n            LAPACKE_xerbla( \"LAPACKE_zpotrf_work\", info );\n            return info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_double*)\n            LAPACKE_malloc( sizeof(lapack_complex_double) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        /* Transpose input matrices */\n        LAPACKE_zpo_trans( matrix_order, uplo, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_zpotrf( &uplo, &n, a_t, &lda_t, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_zpo_trans( LAPACK_COL_MAJOR, uplo, n, a_t, lda_t, a, lda );\n        /* Release memory and exit */\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_zpotrf_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_zpotrf_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zpotri.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function zpotri\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zpotri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda )\n{\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_zpotri\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_zpo_nancheck( matrix_order, uplo, n, a, lda ) ) {\n        return -4;\n    }\n#endif\n    return LAPACKE_zpotri_work( matrix_order, uplo, n, a, lda );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zpotri_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function zpotri\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zpotri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_zpotri( &uplo, &n, a, &lda, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        lapack_complex_double* a_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -5;\n            LAPACKE_xerbla( \"LAPACKE_zpotri_work\", info );\n            return info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_double*)\n            LAPACKE_malloc( sizeof(lapack_complex_double) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        /* Transpose input matrices */\n        LAPACKE_zpo_trans( matrix_order, uplo, n, a, lda, a_t, lda_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_zpotri( &uplo, &n, a_t, &lda_t, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_zpo_trans( LAPACK_COL_MAJOR, uplo, n, a_t, lda_t, a, lda );\n        /* Release memory and exit */\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_zpotri_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_zpotri_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zpotrs.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function zpotrs\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zpotrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb )\n{\n    if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_zpotrs\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    if( LAPACKE_zpo_nancheck( matrix_order, uplo, n, a, lda ) ) {\n        return -5;\n    }\n    if( LAPACKE_zge_nancheck( matrix_order, n, nrhs, b, ldb ) ) {\n        return -7;\n    }\n#endif\n    return LAPACKE_zpotrs_work( matrix_order, uplo, n, nrhs, a, lda, b, ldb );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zpotrs_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2011, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function zpotrs\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zpotrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb )\n{\n    lapack_int info = 0;\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_zpotrs( &uplo, &n, &nrhs, a, &lda, b, &ldb, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_order == LAPACK_ROW_MAJOR ) {\n        lapack_int lda_t = MAX(1,n);\n        lapack_int ldb_t = MAX(1,n);\n        lapack_complex_double* a_t = NULL;\n        lapack_complex_double* b_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < n ) {\n            info = -6;\n            LAPACKE_xerbla( \"LAPACKE_zpotrs_work\", info );\n            return info;\n        }\n        if( ldb < nrhs ) {\n            info = -8;\n            LAPACKE_xerbla( \"LAPACKE_zpotrs_work\", info );\n            return info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_double*)\n            LAPACKE_malloc( sizeof(lapack_complex_double) * lda_t * MAX(1,n) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        b_t = (lapack_complex_double*)\n            LAPACKE_malloc( sizeof(lapack_complex_double) *\n                            ldb_t * MAX(1,nrhs) );\n        if( b_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_1;\n        }\n        /* Transpose input matrices */\n        LAPACKE_zpo_trans( matrix_order, uplo, n, a, lda, a_t, lda_t );\n        LAPACKE_zge_trans( matrix_order, n, nrhs, b, ldb, b_t, ldb_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_zpotrs( &uplo, &n, &nrhs, a_t, &lda_t, b_t, &ldb_t, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_zge_trans( LAPACK_COL_MAJOR, n, nrhs, b_t, ldb_t, b, ldb );\n        /* Release memory and exit */\n        LAPACKE_free( b_t );\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_zpotrs_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_zpotrs_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zunmqr.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2014, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native high-level C interface to LAPACK function zunmqr\n* Author: Intel Corporation\n* Generated November 2015\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zunmqr( int matrix_layout, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* tau,\n                           lapack_complex_double* c, lapack_int ldc )\n{\n    lapack_int info = 0;\n    lapack_int lwork = -1;\n    lapack_complex_double* work = NULL;\n    lapack_complex_double work_query;\n    lapack_int r;\n    if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) {\n        LAPACKE_xerbla( \"LAPACKE_zunmqr\", -1 );\n        return -1;\n    }\n#ifndef LAPACK_DISABLE_NAN_CHECK\n    /* Optionally check input matrices for NaNs */\n    r = LAPACKE_lsame( side, 'l' ) ? m : n;\n    if( LAPACKE_zge_nancheck( matrix_layout, r, k, a, lda ) ) {\n        return -7;\n    }\n    if( LAPACKE_zge_nancheck( matrix_layout, m, n, c, ldc ) ) {\n        return -10;\n    }\n    if( LAPACKE_z_nancheck( k, tau, 1 ) ) {\n        return -9;\n    }\n#endif\n    /* Query optimal working array(s) size */\n    info = LAPACKE_zunmqr_work( matrix_layout, side, trans, m, n, k, a, lda, tau,\n                                c, ldc, &work_query, lwork );\n    if( info != 0 ) {\n        goto exit_level_0;\n    }\n    lwork = LAPACK_Z2INT( work_query );\n    /* Allocate memory for work arrays */\n    work = (lapack_complex_double*)\n        LAPACKE_malloc( sizeof(lapack_complex_double) * lwork );\n    if( work == NULL ) {\n        info = LAPACK_WORK_MEMORY_ERROR;\n        goto exit_level_0;\n    }\n    /* Call middle-level interface */\n    info = LAPACKE_zunmqr_work( matrix_layout, side, trans, m, n, k, a, lda, tau,\n                                c, ldc, work, lwork );\n    /* Release memory and exit */\n    LAPACKE_free( work );\nexit_level_0:\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        LAPACKE_xerbla( \"LAPACKE_zunmqr\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/src/lapacke_zunmqr_work.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2014, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n*****************************************************************************\n* Contents: Native middle-level C interface to LAPACK function zunmqr\n* Author: Intel Corporation\n* Generated November 2015\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_int LAPACKE_zunmqr_work( int matrix_layout, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work, lapack_int lwork )\n{\n    lapack_int info = 0;\n    if( matrix_layout == LAPACK_COL_MAJOR ) {\n        /* Call LAPACK function and adjust info */\n        LAPACK_zunmqr( &side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work,\n                       &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n    } else if( matrix_layout == LAPACK_ROW_MAJOR ) {\n        lapack_int r = LAPACKE_lsame( side, 'l' ) ? m : n;\n        lapack_int lda_t = MAX(1,r);\n        lapack_int ldc_t = MAX(1,m);\n        lapack_complex_double* a_t = NULL;\n        lapack_complex_double* c_t = NULL;\n        /* Check leading dimension(s) */\n        if( lda < k ) {\n            info = -8;\n            LAPACKE_xerbla( \"LAPACKE_zunmqr_work\", info );\n            return info;\n        }\n        if( ldc < n ) {\n            info = -11;\n            LAPACKE_xerbla( \"LAPACKE_zunmqr_work\", info );\n            return info;\n        }\n        /* Query optimal working array(s) size if requested */\n        if( lwork == -1 ) {\n            LAPACK_zunmqr( &side, &trans, &m, &n, &k, a, &lda_t, tau, c, &ldc_t,\n                           work, &lwork, &info );\n            return (info < 0) ? (info - 1) : info;\n        }\n        /* Allocate memory for temporary array(s) */\n        a_t = (lapack_complex_double*)\n            LAPACKE_malloc( sizeof(lapack_complex_double) * lda_t * MAX(1,k) );\n        if( a_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_0;\n        }\n        c_t = (lapack_complex_double*)\n            LAPACKE_malloc( sizeof(lapack_complex_double) * ldc_t * MAX(1,n) );\n        if( c_t == NULL ) {\n            info = LAPACK_TRANSPOSE_MEMORY_ERROR;\n            goto exit_level_1;\n        }\n        /* Transpose input matrices */\n        LAPACKE_zge_trans( matrix_layout, r, k, a, lda, a_t, lda_t );\n        LAPACKE_zge_trans( matrix_layout, m, n, c, ldc, c_t, ldc_t );\n        /* Call LAPACK function and adjust info */\n        LAPACK_zunmqr( &side, &trans, &m, &n, &k, a_t, &lda_t, tau, c_t, &ldc_t,\n                       work, &lwork, &info );\n        if( info < 0 ) {\n            info = info - 1;\n        }\n        /* Transpose output matrices */\n        LAPACKE_zge_trans( LAPACK_COL_MAJOR, m, n, c_t, ldc_t, c, ldc );\n        /* Release memory and exit */\n        LAPACKE_free( c_t );\nexit_level_1:\n        LAPACKE_free( a_t );\nexit_level_0:\n        if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n            LAPACKE_xerbla( \"LAPACKE_zunmqr_work\", info );\n        }\n    } else {\n        info = -1;\n        LAPACKE_xerbla( \"LAPACKE_zunmqr_work\", info );\n    }\n    return info;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_c_nancheck.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n#include \"lapacke_utils.h\"\n\n/* Check a vector for NaN entries. */\n\nlapack_logical LAPACKE_c_nancheck( lapack_int n,\n                                    const lapack_complex_float *x,\n                                    lapack_int incx )\n{\n    lapack_int i, inc;\n\n    if( incx == 0 ) return (lapack_logical) LAPACK_CISNAN( x[0] );\n    inc = ( incx > 0 ) ? incx : -incx ;\n\n    for( i = 0; i < n*inc; i+=inc ) {\n        if( LAPACK_CISNAN( x[i] ) )\n            return (lapack_logical) 1;\n    }\n    return (lapack_logical) 0;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_cge_nancheck.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n#include \"lapacke_utils.h\"\n\n/* Check a matrix for NaN entries. */\n\nlapack_logical LAPACKE_cge_nancheck( int matrix_order, lapack_int m,\n                                      lapack_int n,\n                                      const lapack_complex_float *a,\n                                      lapack_int lda )\n{\n    lapack_int i, j;\n\n    if( a == NULL ) return (lapack_logical) 0;\n\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        for( j = 0; j < n; j++ ) {\n            for( i = 0; i < MIN( m, lda ); i++ ) {\n                if( LAPACK_CISNAN( a[i+(size_t)j*lda] ) )\n                    return (lapack_logical) 1;\n            }\n        }\n    } else if ( matrix_order == LAPACK_ROW_MAJOR ) {\n        for( i = 0; i < m; i++ ) {\n            for( j = 0; j < MIN( n, lda ); j++ ) {\n                if( LAPACK_CISNAN( a[(size_t)i*lda+j] ) )\n                    return (lapack_logical) 1;\n            }\n        }\n    }\n    return (lapack_logical) 0;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_cge_trans.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\n/* Converts input general matrix from row-major(C) to column-major(Fortran)\n * layout or vice versa.\n */\n\nvoid LAPACKE_cge_trans( int matrix_order, lapack_int m, lapack_int n,\n                        const lapack_complex_float* in, lapack_int ldin,\n                        lapack_complex_float* out, lapack_int ldout )\n{\n    lapack_int i, j, x, y;\n\n    if( in == NULL || out == NULL ) return;\n\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        x = n;\n        y = m;\n    } else if ( matrix_order == LAPACK_ROW_MAJOR ) {\n        x = m;\n        y = n;\n    } else {\n        /* Unknown input layout */\n        return;\n    }\n\n    /* In case of incorrect m, n, ldin or ldout the function does nothing */\n    for( i = 0; i < MIN( y, ldin ); i++ ) {\n        for( j = 0; j < MIN( x, ldout ); j++ ) {\n            out[ (size_t)i*ldout + j ] = in[ (size_t)j*ldin + i ];\n        }\n    }\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_cpo_nancheck.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n#include \"lapacke_utils.h\"\n\n/* Check a matrix for NaN entries. */\n\nlapack_logical LAPACKE_cpo_nancheck( int matrix_order, char uplo,\n                                      lapack_int n,\n                                      const lapack_complex_float *a,\n                                      lapack_int lda )\n{\n    return LAPACKE_ctr_nancheck( matrix_order, uplo, 'n', n, a, lda );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_cpo_trans.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\n/* Converts input symmetric matrix from row-major(C) to column-major(Fortran)\n * layout or vice versa.\n */\n\nvoid LAPACKE_cpo_trans( int matrix_order, char uplo, lapack_int n,\n                        const lapack_complex_float *in, lapack_int ldin,\n                        lapack_complex_float *out, lapack_int ldout )\n{\n    LAPACKE_ctr_trans( matrix_order, uplo, 'n', n, in, ldin, out, ldout );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_ctr_nancheck.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n#include \"lapacke_utils.h\"\n\n/* Check a matrix for NaN entries. */\n\nlapack_logical LAPACKE_ctr_nancheck( int matrix_order, char uplo, char diag,\n                                      lapack_int n,\n                                      const lapack_complex_float *a,\n                                      lapack_int lda )\n{\n    lapack_int i, j, st;\n    lapack_logical colmaj, lower, unit;\n\n    if( a == NULL ) return (lapack_logical) 0;\n\n    colmaj = ( matrix_order == LAPACK_COL_MAJOR );\n    lower  = LAPACKE_lsame( uplo, 'l' );\n    unit   = LAPACKE_lsame( diag, 'u' );\n\n    if( ( !colmaj && ( matrix_order != LAPACK_ROW_MAJOR ) ) ||\n        ( !lower  && !LAPACKE_lsame( uplo, 'u' ) ) ||\n        ( !unit   && !LAPACKE_lsame( diag, 'n' ) ) ) {\n        /* Just exit if any of input parameters are wrong */\n        return (lapack_logical) 0;\n    }\n    if( unit ) {\n        /* If unit, then don't touch diagonal, start from 1st column or row */\n        st = 1;\n    } else  {\n        /* If non-unit, then check diagonal also, starting from [0,0] */\n        st = 0;\n    }\n\n    /* Since col_major upper and row_major lower are equal,\n     * and col_major lower and row_major upper are equals too -\n     * using one code for equal cases. XOR( colmaj, upper )\n     */\n    if( ( colmaj || lower ) && !( colmaj && lower ) ) {\n        for( j = st; j < n; j++ ) {\n            for( i = 0; i < MIN( j+1-st, lda ); i++ ) {\n                if( LAPACK_CISNAN( a[i+j*lda] ) )\n                    return (lapack_logical) 1;\n            }\n        }\n    } else {\n        for( j = 0; j < n-st; j++ ) {\n            for( i = j+st; i < MIN( n, lda ); i++ ) {\n                if( LAPACK_CISNAN( a[i+j*lda] ) )\n                    return (lapack_logical) 1;\n            }\n        }\n    }\n    return (lapack_logical) 0;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_ctr_trans.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\n/* Converts input triangular matrix from row-major(C) to column-major(Fortran)\n * layout or vice versa.\n */\n\nvoid LAPACKE_ctr_trans( int matrix_order, char uplo, char diag, lapack_int n,\n                        const lapack_complex_float *in, lapack_int ldin,\n                        lapack_complex_float *out, lapack_int ldout )\n{\n    lapack_int i, j, st;\n    lapack_logical colmaj, lower, unit;\n\n    if( in == NULL || out == NULL ) return ;\n\n    colmaj = ( matrix_order == LAPACK_COL_MAJOR );\n    lower  = LAPACKE_lsame( uplo, 'l' );\n    unit   = LAPACKE_lsame( diag, 'u' );\n\n    if( ( !colmaj && ( matrix_order != LAPACK_ROW_MAJOR ) ) ||\n        ( !lower  && !LAPACKE_lsame( uplo, 'u' ) ) ||\n        ( !unit   && !LAPACKE_lsame( diag, 'n' ) ) ) {\n        /* Just exit if any of input parameters are wrong */\n        return;\n    }\n    if( unit ) {\n        /* If unit, then don't touch diagonal, start from 1st column or row */\n        st = 1;\n    } else  {\n        /* If non-unit, then check diagonal also, starting from [0,0] */\n        st = 0;\n    }\n\n    /* Perform conversion:\n     * Since col_major upper and row_major lower are equal,\n     * and col_major lower and row_major upper are equals too -\n     * using one code for equal cases. XOR( colmaj, upper )\n     */\n    if( ( colmaj || lower ) && !( colmaj && lower ) ) {\n        for( j = st; j < MIN( n, ldout ); j++ ) {\n            for( i = 0; i < MIN( j+1-st, ldin ); i++ ) {\n                out[ j+i*ldout ] = in[ i+j*ldin ];\n            }\n        }\n    } else {\n        for( j = 0; j < MIN( n-st, ldout ); j++ ) {\n            for( i = j+st; i < MIN( n, ldin ); i++ ) {\n                out[ j+i*ldout ] = in[ i+j*ldin ];\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_d_nancheck.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n#include \"lapacke_utils.h\"\n\n/* Check a vector for NaN entries. */\n\nlapack_logical LAPACKE_d_nancheck( lapack_int n,\n                                    const double *x,\n                                    lapack_int incx )\n{\n    lapack_int i, inc;\n\n    if( incx == 0 ) return (lapack_logical) LAPACK_DISNAN( x[0] );\n    inc = ( incx > 0 ) ? incx : -incx ;\n\n    for( i = 0; i < n*inc; i+=inc ) {\n        if( LAPACK_DISNAN( x[i] ) )\n            return (lapack_logical) 1;\n    }\n    return (lapack_logical) 0;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_dge_nancheck.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n#include \"lapacke_utils.h\"\n\n/* Check a matrix for NaN entries. */\n\nlapack_logical LAPACKE_dge_nancheck( int matrix_order, lapack_int m,\n                                      lapack_int n,\n                                      const double *a,\n                                      lapack_int lda )\n{\n    lapack_int i, j;\n\n    if( a == NULL ) return (lapack_logical) 0;\n\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        for( j = 0; j < n; j++ ) {\n            for( i = 0; i < MIN( m, lda ); i++ ) {\n                if( LAPACK_DISNAN( a[i+(size_t)j*lda] ) )\n                    return (lapack_logical) 1;\n            }\n        }\n    } else if ( matrix_order == LAPACK_ROW_MAJOR ) {\n        for( i = 0; i < m; i++ ) {\n            for( j = 0; j < MIN( n, lda ); j++ ) {\n                if( LAPACK_DISNAN( a[(size_t)i*lda+j] ) )\n                    return (lapack_logical) 1;\n            }\n        }\n    }\n    return (lapack_logical) 0;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_dge_trans.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\n/* Converts input general matrix from row-major(C) to column-major(Fortran)\n * layout or vice versa.\n */\n\nvoid LAPACKE_dge_trans( int matrix_order, lapack_int m, lapack_int n,\n                        const double* in, lapack_int ldin,\n                        double* out, lapack_int ldout )\n{\n    lapack_int i, j, x, y;\n\n    if( in == NULL || out == NULL ) return;\n\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        x = n;\n        y = m;\n    } else if ( matrix_order == LAPACK_ROW_MAJOR ) {\n        x = m;\n        y = n;\n    } else {\n        /* Unknown input layout */\n        return;\n    }\n\n    /* In case of incorrect m, n, ldin or ldout the function does nothing */\n    for( i = 0; i < MIN( y, ldin ); i++ ) {\n        for( j = 0; j < MIN( x, ldout ); j++ ) {\n            out[ (size_t)i*ldout + j ] = in[ (size_t)j*ldin + i ];\n        }\n    }\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_dpo_nancheck.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n#include \"lapacke_utils.h\"\n\n/* Check a matrix for NaN entries. */\n\nlapack_logical LAPACKE_dpo_nancheck( int matrix_order, char uplo,\n                                      lapack_int n,\n                                      const double *a,\n                                      lapack_int lda )\n{\n    return LAPACKE_dtr_nancheck( matrix_order, uplo, 'n', n, a, lda );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_dpo_trans.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\n/* Converts input symmetric matrix from row-major(C) to column-major(Fortran)\n * layout or vice versa.\n */\n\nvoid LAPACKE_dpo_trans( int matrix_order, char uplo, lapack_int n,\n                        const double *in, lapack_int ldin,\n                        double *out, lapack_int ldout )\n{\n    LAPACKE_dtr_trans( matrix_order, uplo, 'n', n, in, ldin, out, ldout );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_dtr_nancheck.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n#include \"lapacke_utils.h\"\n\n/* Check a matrix for NaN entries. */\n\nlapack_logical LAPACKE_dtr_nancheck( int matrix_order, char uplo, char diag,\n                                      lapack_int n,\n                                      const double *a,\n                                      lapack_int lda )\n{\n    lapack_int i, j, st;\n    lapack_logical colmaj, lower, unit;\n\n    if( a == NULL ) return (lapack_logical) 0;\n\n    colmaj = ( matrix_order == LAPACK_COL_MAJOR );\n    lower  = LAPACKE_lsame( uplo, 'l' );\n    unit   = LAPACKE_lsame( diag, 'u' );\n\n    if( ( !colmaj && ( matrix_order != LAPACK_ROW_MAJOR ) ) ||\n        ( !lower  && !LAPACKE_lsame( uplo, 'u' ) ) ||\n        ( !unit   && !LAPACKE_lsame( diag, 'n' ) ) ) {\n        /* Just exit if any of input parameters are wrong */\n        return (lapack_logical) 0;\n    }\n    if( unit ) {\n        /* If unit, then don't touch diagonal, start from 1st column or row */\n        st = 1;\n    } else  {\n        /* If non-unit, then check diagonal also, starting from [0,0] */\n        st = 0;\n    }\n\n    /* Since col_major upper and row_major lower are equal,\n     * and col_major lower and row_major upper are equals too -\n     * using one code for equal cases. XOR( colmaj, upper )\n     */\n    if( ( colmaj || lower ) && !( colmaj && lower ) ) {\n        for( j = st; j < n; j++ ) {\n            for( i = 0; i < MIN( j+1-st, lda ); i++ ) {\n                if( LAPACK_DISNAN( a[i+j*lda] ) )\n                    return (lapack_logical) 1;\n            }\n        }\n    } else {\n        for( j = 0; j < n-st; j++ ) {\n            for( i = j+st; i < MIN( n, lda ); i++ ) {\n                if( LAPACK_DISNAN( a[i+j*lda] ) )\n                    return (lapack_logical) 1;\n            }\n        }\n    }\n    return (lapack_logical) 0;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_dtr_trans.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\n/* Converts input triangular matrix from row-major(C) to column-major(Fortran)\n * layout or vice versa.\n */\n\nvoid LAPACKE_dtr_trans( int matrix_order, char uplo, char diag, lapack_int n,\n                        const double *in, lapack_int ldin,\n                        double *out, lapack_int ldout )\n{\n    lapack_int i, j, st;\n    lapack_logical colmaj, lower, unit;\n\n    if( in == NULL || out == NULL ) return ;\n\n    colmaj = ( matrix_order == LAPACK_COL_MAJOR );\n    lower  = LAPACKE_lsame( uplo, 'l' );\n    unit   = LAPACKE_lsame( diag, 'u' );\n\n    if( ( !colmaj && ( matrix_order != LAPACK_ROW_MAJOR ) ) ||\n        ( !lower  && !LAPACKE_lsame( uplo, 'u' ) ) ||\n        ( !unit   && !LAPACKE_lsame( diag, 'n' ) ) ) {\n        /* Just exit if any of input parameters are wrong */\n        return;\n    }\n    if( unit ) {\n        /* If unit, then don't touch diagonal, start from 1st column or row */\n        st = 1;\n    } else  {\n        /* If non-unit, then check diagonal also, starting from [0,0] */\n        st = 0;\n    }\n\n    /* Perform conversion:\n     * Since col_major upper and row_major lower are equal,\n     * and col_major lower and row_major upper are equals too -\n     * using one code for equal cases. XOR( colmaj, upper )\n     */\n    if( ( colmaj || lower ) && !( colmaj && lower ) ) {\n        for( j = st; j < MIN( n, ldout ); j++ ) {\n            for( i = 0; i < MIN( j+1-st, ldin ); i++ ) {\n                out[ j+i*ldout ] = in[ i+j*ldin ];\n            }\n        }\n    } else {\n        for( j = 0; j < MIN( n-st, ldout ); j++ ) {\n            for( i = j+st; i < MIN( n, ldin ); i++ ) {\n                out[ j+i*ldout ] = in[ i+j*ldin ];\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_lsame.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK lsame\n* Author: Intel Corporation\n* Created in January, 2010\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\nlapack_logical LAPACKE_lsame( char ca,  char cb )\n{\n    return (lapack_logical) LAPACK_lsame( &ca, &cb, 1, 1 );\n}\n\n\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_s_nancheck.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n#include \"lapacke_utils.h\"\n\n/* Check a vector for NaN entries. */\n\nlapack_logical LAPACKE_s_nancheck( lapack_int n,\n                                    const float *x,\n                                    lapack_int incx )\n{\n    lapack_int i, inc;\n\n    if( incx == 0 ) return (lapack_logical) LAPACK_SISNAN( x[0] );\n    inc = ( incx > 0 ) ? incx : -incx ;\n\n    for( i = 0; i < n*inc; i+=inc ) {\n        if( LAPACK_SISNAN( x[i] ) )\n            return (lapack_logical) 1;\n    }\n    return (lapack_logical) 0;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_sge_nancheck.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n#include \"lapacke_utils.h\"\n\n/* Check a matrix for NaN entries. */\n\nlapack_logical LAPACKE_sge_nancheck( int matrix_order, lapack_int m,\n                                      lapack_int n,\n                                      const float *a,\n                                      lapack_int lda )\n{\n    lapack_int i, j;\n\n    if( a == NULL ) return (lapack_logical) 0;\n\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        for( j = 0; j < n; j++ ) {\n            for( i = 0; i < MIN( m, lda ); i++ ) {\n                if( LAPACK_SISNAN( a[i+(size_t)j*lda] ) )\n                    return (lapack_logical) 1;\n            }\n        }\n    } else if ( matrix_order == LAPACK_ROW_MAJOR ) {\n        for( i = 0; i < m; i++ ) {\n            for( j = 0; j < MIN( n, lda ); j++ ) {\n                if( LAPACK_SISNAN( a[(size_t)i*lda+j] ) )\n                    return (lapack_logical) 1;\n            }\n        }\n    }\n    return (lapack_logical) 0;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_sge_trans.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\n/* Converts input general matrix from row-major(C) to column-major(Fortran)\n * layout or vice versa.\n */\n\nvoid LAPACKE_sge_trans( int matrix_order, lapack_int m, lapack_int n,\n                        const float* in, lapack_int ldin,\n                        float* out, lapack_int ldout )\n{\n    lapack_int i, j, x, y;\n\n    if( in == NULL || out == NULL ) return;\n\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        x = n;\n        y = m;\n    } else if ( matrix_order == LAPACK_ROW_MAJOR ) {\n        x = m;\n        y = n;\n    } else {\n        /* Unknown input layout */\n        return;\n    }\n\n    /* In case of incorrect m, n, ldin or ldout the function does nothing */\n    for( i = 0; i < MIN( y, ldin ); i++ ) {\n        for( j = 0; j < MIN( x, ldout ); j++ ) {\n            out[ (size_t)i*ldout + j ] = in[ (size_t)j*ldin + i ];\n        }\n    }\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_spo_nancheck.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n#include \"lapacke_utils.h\"\n\n/* Check a matrix for NaN entries. */\n\nlapack_logical LAPACKE_spo_nancheck( int matrix_order, char uplo,\n                                      lapack_int n,\n                                      const float *a,\n                                      lapack_int lda )\n{\n    return LAPACKE_str_nancheck( matrix_order, uplo, 'n', n, a, lda );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_spo_trans.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\n/* Converts input symmetric matrix from row-major(C) to column-major(Fortran)\n * layout or vice versa.\n */\n\nvoid LAPACKE_spo_trans( int matrix_order, char uplo, lapack_int n,\n                        const float *in, lapack_int ldin,\n                        float *out, lapack_int ldout )\n{\n    LAPACKE_str_trans( matrix_order, uplo, 'n', n, in, ldin, out, ldout );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_str_nancheck.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n#include \"lapacke_utils.h\"\n\n/* Check a matrix for NaN entries. */\n\nlapack_logical LAPACKE_str_nancheck( int matrix_order, char uplo, char diag,\n                                      lapack_int n,\n                                      const float *a,\n                                      lapack_int lda )\n{\n    lapack_int i, j, st;\n    lapack_logical colmaj, lower, unit;\n\n    if( a == NULL ) return (lapack_logical) 0;\n\n    colmaj = ( matrix_order == LAPACK_COL_MAJOR );\n    lower  = LAPACKE_lsame( uplo, 'l' );\n    unit   = LAPACKE_lsame( diag, 'u' );\n\n    if( ( !colmaj && ( matrix_order != LAPACK_ROW_MAJOR ) ) ||\n        ( !lower  && !LAPACKE_lsame( uplo, 'u' ) ) ||\n        ( !unit   && !LAPACKE_lsame( diag, 'n' ) ) ) {\n        /* Just exit if any of input parameters are wrong */\n        return (lapack_logical) 0;\n    }\n    if( unit ) {\n        /* If unit, then don't touch diagonal, start from 1st column or row */\n        st = 1;\n    } else  {\n        /* If non-unit, then check diagonal also, starting from [0,0] */\n        st = 0;\n    }\n\n    /* Since col_major upper and row_major lower are equal,\n     * and col_major lower and row_major upper are equals too -\n     * using one code for equal cases. XOR( colmaj, upper )\n     */\n    if( ( colmaj || lower ) && !( colmaj && lower ) ) {\n        for( j = st; j < n; j++ ) {\n            for( i = 0; i < MIN( j+1-st, lda ); i++ ) {\n                if( LAPACK_SISNAN( a[i+j*lda] ) )\n                    return (lapack_logical) 1;\n            }\n        }\n    } else {\n        for( j = 0; j < n-st; j++ ) {\n            for( i = j+st; i < MIN( n, lda ); i++ ) {\n                if( LAPACK_SISNAN( a[i+j*lda] ) )\n                    return (lapack_logical) 1;\n            }\n        }\n    }\n    return (lapack_logical) 0;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_str_trans.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\n/* Converts input triangular matrix from row-major(C) to column-major(Fortran)\n * layout or vice versa.\n */\n\nvoid LAPACKE_str_trans( int matrix_order, char uplo, char diag, lapack_int n,\n                        const float *in, lapack_int ldin,\n                        float *out, lapack_int ldout )\n{\n    lapack_int i, j, st;\n    lapack_logical colmaj, lower, unit;\n\n    if( in == NULL || out == NULL ) return ;\n\n    colmaj = ( matrix_order == LAPACK_COL_MAJOR );\n    lower  = LAPACKE_lsame( uplo, 'l' );\n    unit   = LAPACKE_lsame( diag, 'u' );\n\n    if( ( !colmaj && ( matrix_order != LAPACK_ROW_MAJOR ) ) ||\n        ( !lower  && !LAPACKE_lsame( uplo, 'u' ) ) ||\n        ( !unit   && !LAPACKE_lsame( diag, 'n' ) ) ) {\n        /* Just exit if any of input parameters are wrong */\n        return;\n    }\n    if( unit ) {\n        /* If unit, then don't touch diagonal, start from 1st column or row */\n        st = 1;\n    } else  {\n        /* If non-unit, then check diagonal also, starting from [0,0] */\n        st = 0;\n    }\n\n    /* Perform conversion:\n     * Since col_major upper and row_major lower are equal,\n     * and col_major lower and row_major upper are equals too -\n     * using one code for equal cases. XOR( colmaj, upper )\n     */\n    if( ( colmaj || lower ) && !( colmaj && lower ) ) {\n        for( j = st; j < MIN( n, ldout ); j++ ) {\n            for( i = 0; i < MIN( j+1-st, ldin ); i++ ) {\n                out[ j+i*ldout ] = in[ i+j*ldin ];\n            }\n        }\n    } else {\n        for( j = 0; j < MIN( n-st, ldout ); j++ ) {\n            for( i = j+st; i < MIN( n, ldin ); i++ ) {\n                out[ j+i*ldout ] = in[ i+j*ldin ];\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_xerbla.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK lsame\n* Author: Intel Corporation\n* Created in January, 2010\n*****************************************************************************/\n\n#include <stdio.h>\n#include \"lapacke_utils.h\"\n\nvoid LAPACKE_xerbla( const char *name, lapack_int info )\n{\n    if( info == LAPACK_WORK_MEMORY_ERROR ) {\n        printf( \"Not enough memory to allocate work array in %s\\n\", name );\n    } else if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {\n        printf( \"Not enough memory to transpose matrix in %s\\n\", name );\n    } else if( info < 0 ) {\n        printf( \"Wrong parameter %d in %s\\n\", -(int) info, name );\n    }\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_z_nancheck.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n#include \"lapacke_utils.h\"\n\n/* Check a vector for NaN entries. */\n\nlapack_logical LAPACKE_z_nancheck( lapack_int n,\n                                    const lapack_complex_double *x,\n                                    lapack_int incx )\n{\n    lapack_int i, inc;\n\n    if( incx == 0 ) return (lapack_logical) LAPACK_ZISNAN( x[0] );\n    inc = ( incx > 0 ) ? incx : -incx ;\n\n    for( i = 0; i < n*inc; i+=inc ) {\n        if( LAPACK_ZISNAN( x[i] ) )\n            return (lapack_logical) 1;\n    }\n    return (lapack_logical) 0;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_zge_nancheck.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n#include \"lapacke_utils.h\"\n\n/* Check a matrix for NaN entries. */\n\nlapack_logical LAPACKE_zge_nancheck( int matrix_order, lapack_int m,\n                                      lapack_int n,\n                                      const lapack_complex_double *a,\n                                      lapack_int lda )\n{\n    lapack_int i, j;\n\n    if( a == NULL ) return (lapack_logical) 0;\n\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        for( j = 0; j < n; j++ ) {\n            for( i = 0; i < MIN( m, lda ); i++ ) {\n                if( LAPACK_ZISNAN( a[i+(size_t)j*lda] ) )\n                    return (lapack_logical) 1;\n            }\n        }\n    } else if ( matrix_order == LAPACK_ROW_MAJOR ) {\n        for( i = 0; i < m; i++ ) {\n            for( j = 0; j < MIN( n, lda ); j++ ) {\n                if( LAPACK_ZISNAN( a[(size_t)i*lda+j] ) )\n                    return (lapack_logical) 1;\n            }\n        }\n    }\n    return (lapack_logical) 0;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_zge_trans.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\n/* Converts input general matrix from row-major(C) to column-major(Fortran)\n * layout or vice versa.\n */\n\nvoid LAPACKE_zge_trans( int matrix_order, lapack_int m, lapack_int n,\n                        const lapack_complex_double* in, lapack_int ldin,\n                        lapack_complex_double* out, lapack_int ldout )\n{\n    lapack_int i, j, x, y;\n\n    if( in == NULL || out == NULL ) return;\n\n    if( matrix_order == LAPACK_COL_MAJOR ) {\n        x = n;\n        y = m;\n    } else if ( matrix_order == LAPACK_ROW_MAJOR ) {\n        x = m;\n        y = n;\n    } else {\n        /* Unknown input layout */\n        return;\n    }\n\n    /* In case of incorrect m, n, ldin or ldout the function does nothing */\n    for( i = 0; i < MIN( y, ldin ); i++ ) {\n        for( j = 0; j < MIN( x, ldout ); j++ ) {\n            out[ (size_t)i*ldout + j ] = in[ (size_t)j*ldin + i ];\n        }\n    }\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_zpo_nancheck.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n#include \"lapacke_utils.h\"\n\n/* Check a matrix for NaN entries. */\n\nlapack_logical LAPACKE_zpo_nancheck( int matrix_order, char uplo,\n                                      lapack_int n,\n                                      const lapack_complex_double *a,\n                                      lapack_int lda )\n{\n    return LAPACKE_ztr_nancheck( matrix_order, uplo, 'n', n, a, lda );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_zpo_trans.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\n/* Converts input symmetric matrix from row-major(C) to column-major(Fortran)\n * layout or vice versa.\n */\n\nvoid LAPACKE_zpo_trans( int matrix_order, char uplo, lapack_int n,\n                        const lapack_complex_double *in, lapack_int ldin,\n                        lapack_complex_double *out, lapack_int ldout )\n{\n    LAPACKE_ztr_trans( matrix_order, uplo, 'n', n, in, ldin, out, ldout );\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_ztr_nancheck.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n#include \"lapacke_utils.h\"\n\n/* Check a matrix for NaN entries. */\n\nlapack_logical LAPACKE_ztr_nancheck( int matrix_order, char uplo, char diag,\n                                      lapack_int n,\n                                      const lapack_complex_double *a,\n                                      lapack_int lda )\n{\n    lapack_int i, j, st;\n    lapack_logical colmaj, lower, unit;\n\n    if( a == NULL ) return (lapack_logical) 0;\n\n    colmaj = ( matrix_order == LAPACK_COL_MAJOR );\n    lower  = LAPACKE_lsame( uplo, 'l' );\n    unit   = LAPACKE_lsame( diag, 'u' );\n\n    if( ( !colmaj && ( matrix_order != LAPACK_ROW_MAJOR ) ) ||\n        ( !lower  && !LAPACKE_lsame( uplo, 'u' ) ) ||\n        ( !unit   && !LAPACKE_lsame( diag, 'n' ) ) ) {\n        /* Just exit if any of input parameters are wrong */\n        return (lapack_logical) 0;\n    }\n    if( unit ) {\n        /* If unit, then don't touch diagonal, start from 1st column or row */\n        st = 1;\n    } else  {\n        /* If non-unit, then check diagonal also, starting from [0,0] */\n        st = 0;\n    }\n\n    /* Since col_major upper and row_major lower are equal,\n     * and col_major lower and row_major upper are equals too -\n     * using one code for equal cases. XOR( colmaj, upper )\n     */\n    if( ( colmaj || lower ) && !( colmaj && lower ) ) {\n        for( j = st; j < n; j++ ) {\n            for( i = 0; i < MIN( j+1-st, lda ); i++ ) {\n                if( LAPACK_ZISNAN( a[i+j*lda] ) )\n                    return (lapack_logical) 1;\n            }\n        }\n    } else {\n        for( j = 0; j < n-st; j++ ) {\n            for( i = j+st; i < MIN( n, lda ); i++ ) {\n                if( LAPACK_ZISNAN( a[i+j*lda] ) )\n                    return (lapack_logical) 1;\n            }\n        }\n    }\n    return (lapack_logical) 0;\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke/utils/lapacke_ztr_trans.c",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK utility function\n* Author: Intel Corporation\n* Created in February, 2010\n*****************************************************************************/\n\n#include \"lapacke_utils.h\"\n\n/* Converts input triangular matrix from row-major(C) to column-major(Fortran)\n * layout or vice versa.\n */\n\nvoid LAPACKE_ztr_trans( int matrix_order, char uplo, char diag, lapack_int n,\n                        const lapack_complex_double *in, lapack_int ldin,\n                        lapack_complex_double *out, lapack_int ldout )\n{\n    lapack_int i, j, st;\n    lapack_logical colmaj, lower, unit;\n\n    if( in == NULL || out == NULL ) return ;\n\n    colmaj = ( matrix_order == LAPACK_COL_MAJOR );\n    lower  = LAPACKE_lsame( uplo, 'l' );\n    unit   = LAPACKE_lsame( diag, 'u' );\n\n    if( ( !colmaj && ( matrix_order != LAPACK_ROW_MAJOR ) ) ||\n        ( !lower  && !LAPACKE_lsame( uplo, 'u' ) ) ||\n        ( !unit   && !LAPACKE_lsame( diag, 'n' ) ) ) {\n        /* Just exit if any of input parameters are wrong */\n        return;\n    }\n    if( unit ) {\n        /* If unit, then don't touch diagonal, start from 1st column or row */\n        st = 1;\n    } else  {\n        /* If non-unit, then check diagonal also, starting from [0,0] */\n        st = 0;\n    }\n\n    /* Perform conversion:\n     * Since col_major upper and row_major lower are equal,\n     * and col_major lower and row_major upper are equals too -\n     * using one code for equal cases. XOR( colmaj, upper )\n     */\n    if( ( colmaj || lower ) && !( colmaj && lower ) ) {\n        for( j = st; j < MIN( n, ldout ); j++ ) {\n            for( i = 0; i < MIN( j+1-st, ldin ); i++ ) {\n                out[ j+i*ldout ] = in[ i+j*ldin ];\n            }\n        }\n    } else {\n        for( j = 0; j < MIN( n-st, ldout ); j++ ) {\n            for( i = j+st; i < MIN( n, ldin ); i++ ) {\n                out[ j+i*ldout ] = in[ i+j*ldin ];\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke.cpp",
    "content": "//This file is auto-generated by make_lapacke_cpp.rb\n//It includes all source files in the lapacke/ subdirectory\n#include \"lapacke/src/lapacke_dgeev_work.c\"\n#include \"lapacke/src/lapacke_zgeqrf_work.c\"\n#include \"lapacke/src/lapacke_cgesdd.c\"\n#include \"lapacke/src/lapacke_cunmqr.c\"\n#include \"lapacke/src/lapacke_zgesdd_work.c\"\n#include \"lapacke/src/lapacke_dgeqrf.c\"\n#include \"lapacke/src/lapacke_dgesdd_work.c\"\n#include \"lapacke/src/lapacke_zgeev_work.c\"\n#include \"lapacke/src/lapacke_cpotri_work.c\"\n#include \"lapacke/src/lapacke_dormqr_work.c\"\n#include \"lapacke/src/lapacke_cpotri.c\"\n#include \"lapacke/src/lapacke_cgesvd.c\"\n#include \"lapacke/src/lapacke_cgetrf.c\"\n#include \"lapacke/src/lapacke_dgetrf.c\"\n#include \"lapacke/src/lapacke_sgetri_work.c\"\n#include \"lapacke/src/lapacke_zgetrs.c\"\n#include \"lapacke/src/lapacke_cgetrf_work.c\"\n#include \"lapacke/src/lapacke_sgesvd.c\"\n#include \"lapacke/src/lapacke_cpotrf.c\"\n#include \"lapacke/src/lapacke_dgetrf_work.c\"\n#include \"lapacke/src/lapacke_dgeev.c\"\n#include \"lapacke/src/lapacke_zgeev.c\"\n#include \"lapacke/src/lapacke_cgetri.c\"\n#include \"lapacke/src/lapacke_zgetri_work.c\"\n#include \"lapacke/src/lapacke_zpotrs_work.c\"\n#include \"lapacke/src/lapacke_sormqr.c\"\n#include \"lapacke/src/lapacke_sgeev_work.c\"\n#include \"lapacke/src/lapacke_spotrf_work.c\"\n#include \"lapacke/src/lapacke_zgetri.c\"\n#include \"lapacke/src/lapacke_cgeqrf.c\"\n#include \"lapacke/src/lapacke_zunmqr.c\"\n#include \"lapacke/src/lapacke_zgesvd.c\"\n#include \"lapacke/src/lapacke_dgetri_work.c\"\n#include \"lapacke/src/lapacke_dgeqrf_work.c\"\n#include \"lapacke/src/lapacke_dpotri.c\"\n#include \"lapacke/src/lapacke_dpotri_work.c\"\n#include \"lapacke/src/lapacke_spotrs.c\"\n#include \"lapacke/src/lapacke_cgesvd_work.c\"\n#include \"lapacke/src/lapacke_cpotrs.c\"\n#include \"lapacke/src/lapacke_sormqr_work.c\"\n#include \"lapacke/src/lapacke_zunmqr_work.c\"\n#include \"lapacke/src/lapacke_sgetrs_work.c\"\n#include \"lapacke/src/lapacke_cgeev_work.c\"\n#include \"lapacke/src/lapacke_zpotrf_work.c\"\n#include \"lapacke/src/lapacke_zgeqrf.c\"\n#include \"lapacke/src/lapacke_sgesvd_work.c\"\n#include \"lapacke/src/lapacke_spotrf.c\"\n#include \"lapacke/src/lapacke_cunmqr_work.c\"\n#include \"lapacke/src/lapacke_cpotrf_work.c\"\n#include \"lapacke/src/lapacke_dgetri.c\"\n#include \"lapacke/src/lapacke_cgeqrf_work.c\"\n#include \"lapacke/src/lapacke_sgeqrf_work.c\"\n#include \"lapacke/src/lapacke_zpotri.c\"\n#include \"lapacke/src/lapacke_dgetrs.c\"\n#include \"lapacke/src/lapacke_zgesdd.c\"\n#include \"lapacke/src/lapacke_zpotri_work.c\"\n#include \"lapacke/src/lapacke_sgeev.c\"\n#include \"lapacke/src/lapacke_dgesvd.c\"\n#include \"lapacke/src/lapacke_zpotrf.c\"\n#include \"lapacke/src/lapacke_cgeev.c\"\n#include \"lapacke/src/lapacke_spotri_work.c\"\n#include \"lapacke/src/lapacke_zgetrf.c\"\n#include \"lapacke/src/lapacke_dgetrs_work.c\"\n#include \"lapacke/src/lapacke_cgesdd_work.c\"\n#include \"lapacke/src/lapacke_spotrs_work.c\"\n#include \"lapacke/src/lapacke_cpotrs_work.c\"\n#include \"lapacke/src/lapacke_cgetrs.c\"\n#include \"lapacke/src/lapacke_sgeqrf.c\"\n#include \"lapacke/src/lapacke_sgesdd.c\"\n#include \"lapacke/src/lapacke_sgesdd_work.c\"\n#include \"lapacke/src/lapacke_zgetrs_work.c\"\n#include \"lapacke/src/lapacke_sgetri.c\"\n#include \"lapacke/src/lapacke_spotri.c\"\n#include \"lapacke/src/lapacke_dpotrf.c\"\n#include \"lapacke/src/lapacke_cgetrs_work.c\"\n#include \"lapacke/src/lapacke_zgetrf_work.c\"\n#include \"lapacke/src/lapacke_sgetrf_work.c\"\n#include \"lapacke/src/lapacke_dgesvd_work.c\"\n#include \"lapacke/src/lapacke_dgesdd.c\"\n#include \"lapacke/src/lapacke_cgetri_work.c\"\n#include \"lapacke/src/lapacke_zpotrs.c\"\n#include \"lapacke/src/lapacke_zgesvd_work.c\"\n#include \"lapacke/src/lapacke_dpotrs_work.c\"\n#include \"lapacke/src/lapacke_dormqr.c\"\n#include \"lapacke/src/lapacke_dpotrs.c\"\n#include \"lapacke/src/lapacke_sgetrf.c\"\n#include \"lapacke/src/lapacke_dpotrf_work.c\"\n#include \"lapacke/src/lapacke_sgetrs.c\"\n#include \"lapacke/utils/lapacke_sge_nancheck.c\"\n#include \"lapacke/utils/lapacke_zge_trans.c\"\n#include \"lapacke/utils/lapacke_dpo_trans.c\"\n#include \"lapacke/utils/lapacke_cpo_trans.c\"\n#include \"lapacke/utils/lapacke_cge_trans.c\"\n#include \"lapacke/utils/lapacke_dge_nancheck.c\"\n#include \"lapacke/utils/lapacke_cpo_nancheck.c\"\n#include \"lapacke/utils/lapacke_c_nancheck.c\"\n#include \"lapacke/utils/lapacke_lsame.c\"\n#include \"lapacke/utils/lapacke_str_nancheck.c\"\n#include \"lapacke/utils/lapacke_zpo_trans.c\"\n#include \"lapacke/utils/lapacke_str_trans.c\"\n#include \"lapacke/utils/lapacke_ztr_nancheck.c\"\n#include \"lapacke/utils/lapacke_cge_nancheck.c\"\n#include \"lapacke/utils/lapacke_d_nancheck.c\"\n#include \"lapacke/utils/lapacke_ctr_trans.c\"\n#include \"lapacke/utils/lapacke_dge_trans.c\"\n#include \"lapacke/utils/lapacke_sge_trans.c\"\n#include \"lapacke/utils/lapacke_zge_nancheck.c\"\n#include \"lapacke/utils/lapacke_dtr_nancheck.c\"\n#include \"lapacke/utils/lapacke_s_nancheck.c\"\n#include \"lapacke/utils/lapacke_spo_trans.c\"\n#include \"lapacke/utils/lapacke_dtr_trans.c\"\n#include \"lapacke/utils/lapacke_xerbla.c\"\n#include \"lapacke/utils/lapacke_ctr_nancheck.c\"\n#include \"lapacke/utils/lapacke_ztr_trans.c\"\n#include \"lapacke/utils/lapacke_z_nancheck.c\"\n#include \"lapacke/utils/lapacke_dpo_nancheck.c\"\n#include \"lapacke/utils/lapacke_zpo_nancheck.c\"\n#include \"lapacke/utils/lapacke_spo_nancheck.c\"\n"
  },
  {
    "path": "ext/nmatrix_lapacke/lapacke_nmatrix.h",
    "content": "//need to define a few things before including the real lapacke.h\n\n#include \"data/data.h\" //needed because this is where our complex types are defined\n\n//tell LAPACKE to use our complex types\n#define LAPACK_COMPLEX_CUSTOM\n#define lapack_complex_float nm::Complex64\n#define lapack_complex_double nm::Complex128\n\n//define name-mangling scheme for FORTRAN functions\n//ADD_ means that the symbol dgemm_ is associated with the fortran\n//function DGEMM\n#define ADD_\n\n//now we can include the real lapacke.h\n#include \"lapacke.h\"\n"
  },
  {
    "path": "ext/nmatrix_lapacke/make_lapacke_cpp.rb",
    "content": "#We want this to be a C++ file since our complex types require C++.\n\nFile.open(\"lapacke.cpp\",\"w\") do |file|\n  file.puts \"//This file is auto-generated by make_lapacke_cpp.rb\"\n  file.puts \"//It includes all source files in the lapacke/ subdirectory\"\n  Dir[\"lapacke/**/*.c\"].each do |file2|\n    file.puts \"#include \\\"#{file2}\\\"\"\n  end\nend\n"
  },
  {
    "path": "ext/nmatrix_lapacke/math_lapacke/cblas_local.h",
    "content": "//This is copied from CBLAS reference implementation.\n#ifndef CBLAS_H\n#define CBLAS_H\n#include <stddef.h>\n\n/* Allow the use in C++ code.  */\n#ifdef __cplusplus\nextern \"C\" \n{\n#endif\n\n/*\n * Enumerated and derived types\n */\n#define CBLAS_INDEX size_t  /* this may vary between platforms */\n\n//Remove enums from this file so we can use them in code that doesn't rely on CBLAS\n#include \"math/cblas_enums.h\"\n\n/*\n * ===========================================================================\n * Prototypes for level 1 BLAS functions (complex are recast as routines)\n * ===========================================================================\n */\nfloat  cblas_sdsdot(const int N, const float alpha, const float *X,\n                    const int incX, const float *Y, const int incY);\ndouble cblas_dsdot(const int N, const float *X, const int incX, const float *Y,\n                   const int incY);\nfloat  cblas_sdot(const int N, const float  *X, const int incX,\n                  const float  *Y, const int incY);\ndouble cblas_ddot(const int N, const double *X, const int incX,\n                  const double *Y, const int incY);\n\n/*\n * Functions having prefixes Z and C only\n */\nvoid   cblas_cdotu_sub(const int N, const void *X, const int incX,\n                       const void *Y, const int incY, void *dotu);\nvoid   cblas_cdotc_sub(const int N, const void *X, const int incX,\n                       const void *Y, const int incY, void *dotc);\n\nvoid   cblas_zdotu_sub(const int N, const void *X, const int incX,\n                       const void *Y, const int incY, void *dotu);\nvoid   cblas_zdotc_sub(const int N, const void *X, const int incX,\n                       const void *Y, const int incY, void *dotc);\n\n\n/*\n * Functions having prefixes S D SC DZ\n */\nfloat  cblas_snrm2(const int N, const float *X, const int incX);\nfloat  cblas_sasum(const int N, const float *X, const int incX);\n\ndouble cblas_dnrm2(const int N, const double *X, const int incX);\ndouble cblas_dasum(const int N, const double *X, const int incX);\n\nfloat  cblas_scnrm2(const int N, const void *X, const int incX);\nfloat  cblas_scasum(const int N, const void *X, const int incX);\n\ndouble cblas_dznrm2(const int N, const void *X, const int incX);\ndouble cblas_dzasum(const int N, const void *X, const int incX);\n\n\n/*\n * Functions having standard 4 prefixes (S D C Z)\n */\nCBLAS_INDEX cblas_isamax(const int N, const float  *X, const int incX);\nCBLAS_INDEX cblas_idamax(const int N, const double *X, const int incX);\nCBLAS_INDEX cblas_icamax(const int N, const void   *X, const int incX);\nCBLAS_INDEX cblas_izamax(const int N, const void   *X, const int incX);\n\n/*\n * ===========================================================================\n * Prototypes for level 1 BLAS routines\n * ===========================================================================\n */\n\n/* \n * Routines with standard 4 prefixes (s, d, c, z)\n */\nvoid cblas_sswap(const int N, float *X, const int incX, \n                 float *Y, const int incY);\nvoid cblas_scopy(const int N, const float *X, const int incX, \n                 float *Y, const int incY);\nvoid cblas_saxpy(const int N, const float alpha, const float *X,\n                 const int incX, float *Y, const int incY);\n\nvoid cblas_dswap(const int N, double *X, const int incX, \n                 double *Y, const int incY);\nvoid cblas_dcopy(const int N, const double *X, const int incX, \n                 double *Y, const int incY);\nvoid cblas_daxpy(const int N, const double alpha, const double *X,\n                 const int incX, double *Y, const int incY);\n\nvoid cblas_cswap(const int N, void *X, const int incX, \n                 void *Y, const int incY);\nvoid cblas_ccopy(const int N, const void *X, const int incX, \n                 void *Y, const int incY);\nvoid cblas_caxpy(const int N, const void *alpha, const void *X,\n                 const int incX, void *Y, const int incY);\n\nvoid cblas_zswap(const int N, void *X, const int incX, \n                 void *Y, const int incY);\nvoid cblas_zcopy(const int N, const void *X, const int incX, \n                 void *Y, const int incY);\nvoid cblas_zaxpy(const int N, const void *alpha, const void *X,\n                 const int incX, void *Y, const int incY);\n\n\n/* \n * Routines with S and D prefix only\n */\nvoid cblas_srotg(float *a, float *b, float *c, float *s);\nvoid cblas_srotmg(float *d1, float *d2, float *b1, const float b2, float *P);\nvoid cblas_srot(const int N, float *X, const int incX,\n                float *Y, const int incY, const float c, const float s);\nvoid cblas_srotm(const int N, float *X, const int incX,\n                float *Y, const int incY, const float *P);\n\nvoid cblas_drotg(double *a, double *b, double *c, double *s);\nvoid cblas_drotmg(double *d1, double *d2, double *b1, const double b2, double *P);\nvoid cblas_drot(const int N, double *X, const int incX,\n                double *Y, const int incY, const double c, const double  s);\nvoid cblas_drotm(const int N, double *X, const int incX,\n                double *Y, const int incY, const double *P);\n\n\n/* \n * Routines with S D C Z CS and ZD prefixes\n */\nvoid cblas_sscal(const int N, const float alpha, float *X, const int incX);\nvoid cblas_dscal(const int N, const double alpha, double *X, const int incX);\nvoid cblas_cscal(const int N, const void *alpha, void *X, const int incX);\nvoid cblas_zscal(const int N, const void *alpha, void *X, const int incX);\nvoid cblas_csscal(const int N, const float alpha, void *X, const int incX);\nvoid cblas_zdscal(const int N, const double alpha, void *X, const int incX);\n\n/*\n * ===========================================================================\n * Prototypes for level 2 BLAS\n * ===========================================================================\n */\n\n/* \n * Routines with standard 4 prefixes (S, D, C, Z)\n */\nvoid cblas_sgemv(const enum CBLAS_ORDER order,\n                 const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\n                 const float alpha, const float *A, const int lda,\n                 const float *X, const int incX, const float beta,\n                 float *Y, const int incY);\nvoid cblas_sgbmv(const enum CBLAS_ORDER order,\n                 const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\n                 const int KL, const int KU, const float alpha,\n                 const float *A, const int lda, const float *X,\n                 const int incX, const float beta, float *Y, const int incY);\nvoid cblas_strmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const float *A, const int lda, \n                 float *X, const int incX);\nvoid cblas_stbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const int K, const float *A, const int lda, \n                 float *X, const int incX);\nvoid cblas_stpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const float *Ap, float *X, const int incX);\nvoid cblas_strsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const float *A, const int lda, float *X,\n                 const int incX);\nvoid cblas_stbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const int K, const float *A, const int lda,\n                 float *X, const int incX);\nvoid cblas_stpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const float *Ap, float *X, const int incX);\n\nvoid cblas_dgemv(const enum CBLAS_ORDER order,\n                 const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\n                 const double alpha, const double *A, const int lda,\n                 const double *X, const int incX, const double beta,\n                 double *Y, const int incY);\nvoid cblas_dgbmv(const enum CBLAS_ORDER order,\n                 const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\n                 const int KL, const int KU, const double alpha,\n                 const double *A, const int lda, const double *X,\n                 const int incX, const double beta, double *Y, const int incY);\nvoid cblas_dtrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const double *A, const int lda, \n                 double *X, const int incX);\nvoid cblas_dtbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const int K, const double *A, const int lda, \n                 double *X, const int incX);\nvoid cblas_dtpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const double *Ap, double *X, const int incX);\nvoid cblas_dtrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const double *A, const int lda, double *X,\n                 const int incX);\nvoid cblas_dtbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const int K, const double *A, const int lda,\n                 double *X, const int incX);\nvoid cblas_dtpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const double *Ap, double *X, const int incX);\n\nvoid cblas_cgemv(const enum CBLAS_ORDER order,\n                 const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\n                 const void *alpha, const void *A, const int lda,\n                 const void *X, const int incX, const void *beta,\n                 void *Y, const int incY);\nvoid cblas_cgbmv(const enum CBLAS_ORDER order,\n                 const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\n                 const int KL, const int KU, const void *alpha,\n                 const void *A, const int lda, const void *X,\n                 const int incX, const void *beta, void *Y, const int incY);\nvoid cblas_ctrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const void *A, const int lda, \n                 void *X, const int incX);\nvoid cblas_ctbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const int K, const void *A, const int lda, \n                 void *X, const int incX);\nvoid cblas_ctpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const void *Ap, void *X, const int incX);\nvoid cblas_ctrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const void *A, const int lda, void *X,\n                 const int incX);\nvoid cblas_ctbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const int K, const void *A, const int lda,\n                 void *X, const int incX);\nvoid cblas_ctpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const void *Ap, void *X, const int incX);\n\nvoid cblas_zgemv(const enum CBLAS_ORDER order,\n                 const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\n                 const void *alpha, const void *A, const int lda,\n                 const void *X, const int incX, const void *beta,\n                 void *Y, const int incY);\nvoid cblas_zgbmv(const enum CBLAS_ORDER order,\n                 const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\n                 const int KL, const int KU, const void *alpha,\n                 const void *A, const int lda, const void *X,\n                 const int incX, const void *beta, void *Y, const int incY);\nvoid cblas_ztrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const void *A, const int lda, \n                 void *X, const int incX);\nvoid cblas_ztbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const int K, const void *A, const int lda, \n                 void *X, const int incX);\nvoid cblas_ztpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const void *Ap, void *X, const int incX);\nvoid cblas_ztrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const void *A, const int lda, void *X,\n                 const int incX);\nvoid cblas_ztbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const int K, const void *A, const int lda,\n                 void *X, const int incX);\nvoid cblas_ztpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const void *Ap, void *X, const int incX);\n\n\n/* \n * Routines with S and D prefixes only\n */\nvoid cblas_ssymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const int N, const float alpha, const float *A,\n                 const int lda, const float *X, const int incX,\n                 const float beta, float *Y, const int incY);\nvoid cblas_ssbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const int N, const int K, const float alpha, const float *A,\n                 const int lda, const float *X, const int incX,\n                 const float beta, float *Y, const int incY);\nvoid cblas_sspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const int N, const float alpha, const float *Ap,\n                 const float *X, const int incX,\n                 const float beta, float *Y, const int incY);\nvoid cblas_sger(const enum CBLAS_ORDER order, const int M, const int N,\n                const float alpha, const float *X, const int incX,\n                const float *Y, const int incY, float *A, const int lda);\nvoid cblas_ssyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                const int N, const float alpha, const float *X,\n                const int incX, float *A, const int lda);\nvoid cblas_sspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                const int N, const float alpha, const float *X,\n                const int incX, float *Ap);\nvoid cblas_ssyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                const int N, const float alpha, const float *X,\n                const int incX, const float *Y, const int incY, float *A,\n                const int lda);\nvoid cblas_sspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                const int N, const float alpha, const float *X,\n                const int incX, const float *Y, const int incY, float *A);\n\nvoid cblas_dsymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const int N, const double alpha, const double *A,\n                 const int lda, const double *X, const int incX,\n                 const double beta, double *Y, const int incY);\nvoid cblas_dsbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const int N, const int K, const double alpha, const double *A,\n                 const int lda, const double *X, const int incX,\n                 const double beta, double *Y, const int incY);\nvoid cblas_dspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const int N, const double alpha, const double *Ap,\n                 const double *X, const int incX,\n                 const double beta, double *Y, const int incY);\nvoid cblas_dger(const enum CBLAS_ORDER order, const int M, const int N,\n                const double alpha, const double *X, const int incX,\n                const double *Y, const int incY, double *A, const int lda);\nvoid cblas_dsyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                const int N, const double alpha, const double *X,\n                const int incX, double *A, const int lda);\nvoid cblas_dspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                const int N, const double alpha, const double *X,\n                const int incX, double *Ap);\nvoid cblas_dsyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                const int N, const double alpha, const double *X,\n                const int incX, const double *Y, const int incY, double *A,\n                const int lda);\nvoid cblas_dspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                const int N, const double alpha, const double *X,\n                const int incX, const double *Y, const int incY, double *A);\n\n\n/* \n * Routines with C and Z prefixes only\n */\nvoid cblas_chemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const int N, const void *alpha, const void *A,\n                 const int lda, const void *X, const int incX,\n                 const void *beta, void *Y, const int incY);\nvoid cblas_chbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const int N, const int K, const void *alpha, const void *A,\n                 const int lda, const void *X, const int incX,\n                 const void *beta, void *Y, const int incY);\nvoid cblas_chpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const int N, const void *alpha, const void *Ap,\n                 const void *X, const int incX,\n                 const void *beta, void *Y, const int incY);\nvoid cblas_cgeru(const enum CBLAS_ORDER order, const int M, const int N,\n                 const void *alpha, const void *X, const int incX,\n                 const void *Y, const int incY, void *A, const int lda);\nvoid cblas_cgerc(const enum CBLAS_ORDER order, const int M, const int N,\n                 const void *alpha, const void *X, const int incX,\n                 const void *Y, const int incY, void *A, const int lda);\nvoid cblas_cher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                const int N, const float alpha, const void *X, const int incX,\n                void *A, const int lda);\nvoid cblas_chpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                const int N, const float alpha, const void *X,\n                const int incX, void *A);\nvoid cblas_cher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,\n                const void *alpha, const void *X, const int incX,\n                const void *Y, const int incY, void *A, const int lda);\nvoid cblas_chpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,\n                const void *alpha, const void *X, const int incX,\n                const void *Y, const int incY, void *Ap);\n\nvoid cblas_zhemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const int N, const void *alpha, const void *A,\n                 const int lda, const void *X, const int incX,\n                 const void *beta, void *Y, const int incY);\nvoid cblas_zhbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const int N, const int K, const void *alpha, const void *A,\n                 const int lda, const void *X, const int incX,\n                 const void *beta, void *Y, const int incY);\nvoid cblas_zhpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                 const int N, const void *alpha, const void *Ap,\n                 const void *X, const int incX,\n                 const void *beta, void *Y, const int incY);\nvoid cblas_zgeru(const enum CBLAS_ORDER order, const int M, const int N,\n                 const void *alpha, const void *X, const int incX,\n                 const void *Y, const int incY, void *A, const int lda);\nvoid cblas_zgerc(const enum CBLAS_ORDER order, const int M, const int N,\n                 const void *alpha, const void *X, const int incX,\n                 const void *Y, const int incY, void *A, const int lda);\nvoid cblas_zher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                const int N, const double alpha, const void *X, const int incX,\n                void *A, const int lda);\nvoid cblas_zhpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\n                const int N, const double alpha, const void *X,\n                const int incX, void *A);\nvoid cblas_zher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,\n                const void *alpha, const void *X, const int incX,\n                const void *Y, const int incY, void *A, const int lda);\nvoid cblas_zhpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,\n                const void *alpha, const void *X, const int incX,\n                const void *Y, const int incY, void *Ap);\n\n/*\n * ===========================================================================\n * Prototypes for level 3 BLAS\n * ===========================================================================\n */\n\n/* \n * Routines with standard 4 prefixes (S, D, C, Z)\n */\nvoid cblas_sgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_TRANSPOSE TransB, const int M, const int N,\n                 const int K, const float alpha, const float *A,\n                 const int lda, const float *B, const int ldb,\n                 const float beta, float *C, const int ldc);\nvoid cblas_ssymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const int M, const int N,\n                 const float alpha, const float *A, const int lda,\n                 const float *B, const int ldb, const float beta,\n                 float *C, const int ldc);\nvoid cblas_ssyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                 const float alpha, const float *A, const int lda,\n                 const float beta, float *C, const int ldc);\nvoid cblas_ssyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                  const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                  const float alpha, const float *A, const int lda,\n                  const float *B, const int ldb, const float beta,\n                  float *C, const int ldc);\nvoid cblas_strmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_DIAG Diag, const int M, const int N,\n                 const float alpha, const float *A, const int lda,\n                 float *B, const int ldb);\nvoid cblas_strsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_DIAG Diag, const int M, const int N,\n                 const float alpha, const float *A, const int lda,\n                 float *B, const int ldb);\n\nvoid cblas_dgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_TRANSPOSE TransB, const int M, const int N,\n                 const int K, const double alpha, const double *A,\n                 const int lda, const double *B, const int ldb,\n                 const double beta, double *C, const int ldc);\nvoid cblas_dsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const int M, const int N,\n                 const double alpha, const double *A, const int lda,\n                 const double *B, const int ldb, const double beta,\n                 double *C, const int ldc);\nvoid cblas_dsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                 const double alpha, const double *A, const int lda,\n                 const double beta, double *C, const int ldc);\nvoid cblas_dsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                  const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                  const double alpha, const double *A, const int lda,\n                  const double *B, const int ldb, const double beta,\n                  double *C, const int ldc);\nvoid cblas_dtrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_DIAG Diag, const int M, const int N,\n                 const double alpha, const double *A, const int lda,\n                 double *B, const int ldb);\nvoid cblas_dtrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_DIAG Diag, const int M, const int N,\n                 const double alpha, const double *A, const int lda,\n                 double *B, const int ldb);\n\nvoid cblas_cgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_TRANSPOSE TransB, const int M, const int N,\n                 const int K, const void *alpha, const void *A,\n                 const int lda, const void *B, const int ldb,\n                 const void *beta, void *C, const int ldc);\nvoid cblas_csymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const int M, const int N,\n                 const void *alpha, const void *A, const int lda,\n                 const void *B, const int ldb, const void *beta,\n                 void *C, const int ldc);\nvoid cblas_csyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                 const void *alpha, const void *A, const int lda,\n                 const void *beta, void *C, const int ldc);\nvoid cblas_csyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                  const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                  const void *alpha, const void *A, const int lda,\n                  const void *B, const int ldb, const void *beta,\n                  void *C, const int ldc);\nvoid cblas_ctrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_DIAG Diag, const int M, const int N,\n                 const void *alpha, const void *A, const int lda,\n                 void *B, const int ldb);\nvoid cblas_ctrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_DIAG Diag, const int M, const int N,\n                 const void *alpha, const void *A, const int lda,\n                 void *B, const int ldb);\n\nvoid cblas_zgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_TRANSPOSE TransB, const int M, const int N,\n                 const int K, const void *alpha, const void *A,\n                 const int lda, const void *B, const int ldb,\n                 const void *beta, void *C, const int ldc);\nvoid cblas_zsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const int M, const int N,\n                 const void *alpha, const void *A, const int lda,\n                 const void *B, const int ldb, const void *beta,\n                 void *C, const int ldc);\nvoid cblas_zsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                 const void *alpha, const void *A, const int lda,\n                 const void *beta, void *C, const int ldc);\nvoid cblas_zsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                  const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                  const void *alpha, const void *A, const int lda,\n                  const void *B, const int ldb, const void *beta,\n                  void *C, const int ldc);\nvoid cblas_ztrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_DIAG Diag, const int M, const int N,\n                 const void *alpha, const void *A, const int lda,\n                 void *B, const int ldb);\nvoid cblas_ztrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_DIAG Diag, const int M, const int N,\n                 const void *alpha, const void *A, const int lda,\n                 void *B, const int ldb);\n\n\n/* \n * Routines with prefixes C and Z only\n */\nvoid cblas_chemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const int M, const int N,\n                 const void *alpha, const void *A, const int lda,\n                 const void *B, const int ldb, const void *beta,\n                 void *C, const int ldc);\nvoid cblas_cherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                 const float alpha, const void *A, const int lda,\n                 const float beta, void *C, const int ldc);\nvoid cblas_cher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                  const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                  const void *alpha, const void *A, const int lda,\n                  const void *B, const int ldb, const float beta,\n                  void *C, const int ldc);\n\nvoid cblas_zhemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const int M, const int N,\n                 const void *alpha, const void *A, const int lda,\n                 const void *B, const int ldb, const void *beta,\n                 void *C, const int ldc);\nvoid cblas_zherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                 const double alpha, const void *A, const int lda,\n                 const double beta, void *C, const int ldc);\nvoid cblas_zher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                  const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                  const void *alpha, const void *A, const int lda,\n                  const void *B, const int ldb, const double beta,\n                  void *C, const int ldc);\n\nvoid cblas_xerbla(int p, const char *rout, const char *form, ...);\n\n#ifdef __cplusplus\n}\n#endif \n\n#endif\n"
  },
  {
    "path": "ext/nmatrix_lapacke/math_lapacke/cblas_templates_lapacke.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == cblas_templaces_lapacke.h\n//\n// Define template functions for calling CBLAS functions in the\n// nm::math::lapacke namespace.\n//\n\n#ifndef CBLAS_TEMPLATES_LAPACK_H\n#define CBLAS_TEMPLATES_LAPACK_H\n\n//includes so we have access to internal implementations\n#include \"math/rotg.h\"\n#include \"math/rot.h\"\n#include \"math/asum.h\"\n#include \"math/nrm2.h\"\n#include \"math/imax.h\"\n#include \"math/scal.h\"\n#include \"math/gemv.h\"\n#include \"math/gemm.h\"\n#include \"math/trsm.h\"\n\nnamespace nm { namespace math { namespace lapacke {\n \n//Add cblas templates in the correct namespace\n#include \"math/cblas_templates_core.h\"\n\n}}}\n\n#endif\n"
  },
  {
    "path": "ext/nmatrix_lapacke/math_lapacke/lapacke_templates.h",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == lapacke_templates.h\n//\n// Templated functions for calling LAPACKE functions directly.\n//\n\n#ifndef LAPACKE_TEMPLATES_H\n#define LAPACKE_TEMPLATES_H\n\nnamespace nm { namespace math { namespace lapacke {\n\n//getrf\ntemplate <typename DType>\ninline int getrf(const enum CBLAS_ORDER order, const int m, const int n, DType* a, const int lda, int* ipiv) {\n  //We don't want to call the internal implementation since the the CLAPACK interface is slightly different than the LAPACKE.\n  rb_raise(rb_eNotImpError, \"lapacke_getrf not implemented for non_BLAS dtypes. Try clapack_getrf instead.\");\n  return 0;\n}\n\ntemplate <>\ninline int getrf(const enum CBLAS_ORDER order, const int m, const int n, float* a, const int lda, int* ipiv) {\n  return LAPACKE_sgetrf(order, m, n, a, lda, ipiv);\n}\n\ntemplate <>\ninline int getrf(const enum CBLAS_ORDER order, const int m, const int n, double* a, const int lda, int* ipiv) {\n  return LAPACKE_dgetrf(order, m, n, a, lda, ipiv);\n}\n\ntemplate <>\ninline int getrf(const enum CBLAS_ORDER order, const int m, const int n, Complex64* a, const int lda, int* ipiv) {\n  return LAPACKE_cgetrf(order, m, n, a, lda, ipiv);\n}\n\ntemplate <>\ninline int getrf(const enum CBLAS_ORDER order, const int m, const int n, Complex128* a, const int lda, int* ipiv) {\n  return LAPACKE_zgetrf(order, m, n, a, lda, ipiv);\n}\n\ntemplate <typename DType>\ninline int lapacke_getrf(const enum CBLAS_ORDER order, const int m, const int n, void* a, const int lda, int* ipiv) {\n  return getrf<DType>(order, m, n, static_cast<DType*>(a), lda, ipiv);\n}\n\n//geqrf\ntemplate <typename DType>\ninline int geqrf(const enum CBLAS_ORDER order, const int m, const int n, DType* a, const int lda, DType* tau) {\n  rb_raise(rb_eNotImpError, \"lapacke_geqrf not implemented for non_BLAS dtypes.\");\n  return 0;\n}\n\ntemplate <>\ninline int geqrf(const enum CBLAS_ORDER order, const int m, const int n, float* a, const int lda, float* tau) {\n  return LAPACKE_sgeqrf(order, m, n, a, lda, tau);\n}\n\ntemplate < > \ninline int geqrf(const enum CBLAS_ORDER order, const int m, const int n, double* a, const int lda, double* tau) {\n  return LAPACKE_dgeqrf(order, m, n, a, lda, tau);\n}\n\ntemplate <>\ninline int geqrf(const enum CBLAS_ORDER order, const int m, const int n, Complex64* a, const int lda, Complex64* tau) {\n  return LAPACKE_cgeqrf(order, m, n, a, lda, tau);\n}\n\ntemplate <>\ninline int geqrf(const enum CBLAS_ORDER order, const int m, const int n, Complex128* a, const int lda, Complex128* tau) {\n  return LAPACKE_zgeqrf(order, m, n, a, lda, tau);\n}\n\ntemplate <typename DType>\ninline int lapacke_geqrf(const enum CBLAS_ORDER order, const int m, const int n, void* a, const int lda, void* tau) {\n  return geqrf<DType>(order, m, n, static_cast<DType*>(a), lda, static_cast<DType*>(tau));\n}\n\n//ormqr\ntemplate <typename DType>\ninline int ormqr(const enum CBLAS_ORDER order, char side, char trans, const int m, const int n, const int k, DType* a, const int lda, DType* tau, DType* c, const int ldc) {\n  rb_raise(rb_eNotImpError, \"lapacke_ormqr not implemented for non_BLAS dtypes.\");\n  return 0;\n}\n\ntemplate <>\ninline int ormqr(const enum CBLAS_ORDER order, char side, char trans, const int m, const int n, const int k, float* a, const int lda, float* tau, float* c, const int ldc) {\n  return LAPACKE_sormqr(order, side, trans, m, n, k, a, lda, tau, c, ldc);\n}\n\ntemplate <> \ninline int ormqr(const enum CBLAS_ORDER order, char side, char trans, const int m, const int n, const int k, double* a, const int lda, double* tau, double* c, const int ldc) {\n  return LAPACKE_dormqr(order, side, trans, m, n, k, a, lda, tau, c, ldc);\n}\n\ntemplate <typename DType>\ninline int lapacke_ormqr(const enum CBLAS_ORDER order, char side, char trans, const int m, const int n, const int k, void* a, const int lda, void* tau, void* c, const int ldc) {\n  return ormqr<DType>(order, side, trans, m, n, k, static_cast<DType*>(a), lda, static_cast<DType*>(tau), static_cast<DType*>(c), ldc);\n}\n\n//unmqr\ntemplate <typename DType>\ninline int unmqr(const enum CBLAS_ORDER order, char side, char trans, const int m, const int n, const int k, DType* a, const int lda, DType* tau, DType* c, const int ldc) {\n  rb_raise(rb_eNotImpError, \"lapacke_unmqr not implemented for non complex dtypes.\");\n  return 0;\n}\n\ntemplate <>\ninline int unmqr(const enum CBLAS_ORDER order, char side, char trans, const int m, const int n, const int k, Complex64* a, const int lda, Complex64* tau, Complex64* c, const int ldc) {\n  return LAPACKE_cunmqr(order, side, trans, m, n, k, a, lda, tau, c, ldc);\n}\n\ntemplate <> \ninline int unmqr(const enum CBLAS_ORDER order, char side, char trans, const int m, const int n, const int k, Complex128* a, const int lda, Complex128* tau, Complex128* c, const int ldc) {\n  return LAPACKE_zunmqr(order, side, trans, m, n, k, a, lda, tau, c, ldc);\n}\n\ntemplate <typename DType>\ninline int lapacke_unmqr(const enum CBLAS_ORDER order, char side, char trans, const int m, const int n, const int k, void* a, const int lda, void* tau, void* c, const int ldc) {\n  return unmqr<DType>(order, side, trans, m, n, k, static_cast<DType*>(a), lda, static_cast<DType*>(tau), static_cast<DType*>(c), ldc);\n}\n\n//getri\ntemplate <typename DType>\ninline int getri(const enum CBLAS_ORDER order, const int n, DType* a, const int lda, const int* ipiv) {\n  rb_raise(rb_eNotImpError, \"getri not yet implemented for non-BLAS dtypes\");\n  return 0;\n}\n\ntemplate <>\ninline int getri(const enum CBLAS_ORDER order, const int n, float* a, const int lda, const int* ipiv) {\n  return LAPACKE_sgetri(order, n, a, lda, ipiv);\n}\n\ntemplate <>\ninline int getri(const enum CBLAS_ORDER order, const int n, double* a, const int lda, const int* ipiv) {\n  return LAPACKE_dgetri(order, n, a, lda, ipiv);\n}\n\ntemplate <>\ninline int getri(const enum CBLAS_ORDER order, const int n, Complex64* a, const int lda, const int* ipiv) {\n  return LAPACKE_cgetri(order, n, a, lda, ipiv);\n}\n\ntemplate <>\ninline int getri(const enum CBLAS_ORDER order, const int n, Complex128* a, const int lda, const int* ipiv) {\n  return LAPACKE_zgetri(order, n, a, lda, ipiv);\n}\n\ntemplate <typename DType>\ninline int lapacke_getri(const enum CBLAS_ORDER order, const int n, void* a, const int lda, const int* ipiv) {\n  return getri<DType>(order, n, static_cast<DType*>(a), lda, ipiv);\n}\n\n//getrs\ntemplate <typename DType>\ninline int getrs(const enum CBLAS_ORDER Order, char Trans, const int N, const int NRHS, const DType* A,\n           const int lda, const int* ipiv, DType* B, const int ldb)\n{\n  rb_raise(rb_eNotImpError, \"lapacke_getrs not implemented for non_BLAS dtypes. Try clapack_getrs instead.\");\n  return 0;\n}\n\ntemplate <>\ninline int getrs(const enum CBLAS_ORDER Order, char Trans, const int N, const int NRHS, const float* A,\n           const int lda, const int* ipiv, float* B, const int ldb)\n{\n  return LAPACKE_sgetrs(Order, Trans, N, NRHS, A, lda, ipiv, B, ldb);\n}\n\ntemplate <>\ninline int getrs(const enum CBLAS_ORDER Order, char Trans, const int N, const int NRHS, const double* A,\n           const int lda, const int* ipiv, double* B, const int ldb)\n{\n  return LAPACKE_dgetrs(Order, Trans, N, NRHS, A, lda, ipiv, B, ldb);\n}\n\ntemplate <>\ninline int getrs(const enum CBLAS_ORDER Order, char Trans, const int N, const int NRHS, const Complex64* A,\n           const int lda, const int* ipiv, Complex64* B, const int ldb)\n{\n  return LAPACKE_cgetrs(Order, Trans, N, NRHS, A, lda, ipiv, B, ldb);\n}\n\ntemplate <>\ninline int getrs(const enum CBLAS_ORDER Order, char Trans, const int N, const int NRHS, const Complex128* A,\n           const int lda, const int* ipiv, Complex128* B, const int ldb)\n{\n  return LAPACKE_zgetrs(Order, Trans, N, NRHS, A, lda, ipiv, B, ldb);\n}\n\ntemplate <typename DType>\ninline int lapacke_getrs(const enum CBLAS_ORDER order, char trans, const int n, const int nrhs,\n                         const void* a, const int lda, const int* ipiv, void* b, const int ldb) {\n  return getrs<DType>(order, trans, n, nrhs, static_cast<const DType*>(a), lda, ipiv, static_cast<DType*>(b), ldb);\n}\n\n//potrf\ntemplate <typename DType>\ninline int potrf(const enum CBLAS_ORDER order, char uplo, const int N, DType* A, const int lda) {\n  rb_raise(rb_eNotImpError, \"not implemented for non-BLAS dtypes\");\n  return 0;\n}\n\ntemplate <>\ninline int potrf(const enum CBLAS_ORDER order, char uplo, const int N, float* A, const int lda) {\n  return LAPACKE_spotrf(order, uplo, N, A, lda);\n}\n\ntemplate <>\ninline int potrf(const enum CBLAS_ORDER order, char uplo, const int N, double* A, const int lda) {\n  return LAPACKE_dpotrf(order, uplo, N, A, lda);\n}\n\ntemplate <>\ninline int potrf(const enum CBLAS_ORDER order, char uplo, const int N, Complex64* A, const int lda) {\n  return LAPACKE_cpotrf(order, uplo, N, A, lda);\n}\n\ntemplate <>\ninline int potrf(const enum CBLAS_ORDER order, char uplo, const int N, Complex128* A, const int lda) {\n  return LAPACKE_zpotrf(order, uplo, N, A, lda);\n}\n\ntemplate <typename DType>\ninline int lapacke_potrf(const enum CBLAS_ORDER order, char uplo, const int n, void* a, const int lda) {\n  return potrf<DType>(order, uplo, n, static_cast<DType*>(a), lda);\n}\n\n//potrs\ntemplate <typename DType>\ninline int potrs(const enum CBLAS_ORDER Order, char Uplo, const int N, const int NRHS, const DType* A,\n           const int lda, DType* B, const int ldb)\n{\n  rb_raise(rb_eNotImpError, \"not implemented for non-BLAS dtypes\");\n  return 0;\n}\n\ntemplate <>\ninline int potrs<float> (const enum CBLAS_ORDER Order, char Uplo, const int N, const int NRHS, const float* A,\n           const int lda, float* B, const int ldb)\n{\n  return LAPACKE_spotrs(Order, Uplo, N, NRHS, A, lda, B, ldb);\n}\n\ntemplate <>\ninline int potrs<double>(const enum CBLAS_ORDER Order, char Uplo, const int N, const int NRHS, const double* A,\n           const int lda, double* B, const int ldb)\n{\n  return LAPACKE_dpotrs(Order, Uplo, N, NRHS, A, lda, B, ldb);\n}\n\ntemplate <>\ninline int potrs<Complex64>(const enum CBLAS_ORDER Order, char Uplo, const int N, const int NRHS, const Complex64* A,\n           const int lda, Complex64* B, const int ldb)\n{\n  return LAPACKE_cpotrs(Order, Uplo, N, NRHS, A, lda, B, ldb);\n}\n\ntemplate <>\ninline int potrs<Complex128>(const enum CBLAS_ORDER Order, char Uplo, const int N, const int NRHS, const Complex128* A,\n           const int lda, Complex128* B, const int ldb)\n{\n  return LAPACKE_zpotrs(Order, Uplo, N, NRHS, A, lda, B, ldb);\n}\n\ntemplate <typename DType>\ninline int lapacke_potrs(const enum CBLAS_ORDER order, char uplo, const int n, const int nrhs,\n                         const void* a, const int lda, void* b, const int ldb) {\n  return potrs<DType>(order, uplo, n, nrhs, static_cast<const DType*>(a), lda, static_cast<DType*>(b), ldb);\n}\n\n//potri\ntemplate <typename DType>\ninline int potri(const enum CBLAS_ORDER order, char uplo, const int n, DType* a, const int lda) {\n  rb_raise(rb_eNotImpError, \"potri not yet implemented for non-BLAS dtypes\");\n  return 0;\n}\n\ntemplate <>\ninline int potri(const enum CBLAS_ORDER order, char uplo, const int n, float* a, const int lda) {\n  return LAPACKE_spotri(order, uplo, n, a, lda);\n}\n\ntemplate <>\ninline int potri(const enum CBLAS_ORDER order, char uplo, const int n, double* a, const int lda) {\n  return LAPACKE_dpotri(order, uplo, n, a, lda);\n}\n\ntemplate <>\ninline int potri(const enum CBLAS_ORDER order, char uplo, const int n, Complex64* a, const int lda) {\n  return LAPACKE_cpotri(order, uplo, n, a, lda);\n}\n\ntemplate <>\ninline int potri(const enum CBLAS_ORDER order, char uplo, const int n, Complex128* a, const int lda) {\n  return LAPACKE_zpotri(order, uplo, n, a, lda);\n}\n\ntemplate <typename DType>\ninline int lapacke_potri(const enum CBLAS_ORDER order, char uplo, const int n, void* a, const int lda) {\n  return potri<DType>(order, uplo, n, static_cast<DType*>(a), lda);\n}\n\n//gesvd\ntemplate <typename DType, typename CType>\ninline int gesvd(int matrix_layout, char jobu, char jobvt, int m, int n, DType* a, int lda, CType* s, DType* u, int ldu, DType* vt, int ldvt, CType* superb) {\n  rb_raise(rb_eNotImpError, \"gesvd not yet implemented for non-BLAS dtypes\");\n  return 0;\n}\n\ntemplate <>\ninline int gesvd<float, float>(int matrix_layout, char jobu, char jobvt, int m, int n, float* a, int lda, float* s, float* u, int ldu, float* vt, int ldvt, float* superb) {\n  return LAPACKE_sgesvd(matrix_layout, jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, superb);\n}\n\ntemplate <>\ninline int gesvd<double, double>(int matrix_layout, char jobu, char jobvt, int m, int n, double* a, int lda, double* s, double* u, int ldu, double* vt, int ldvt, double* superb) {\n  return LAPACKE_dgesvd(matrix_layout, jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, superb);\n}\n\ntemplate <>\ninline int gesvd<nm::Complex64, float>(int matrix_layout, char jobu, char jobvt, int m, int n, nm::Complex64* a, int lda, float* s, nm::Complex64* u, int ldu, nm::Complex64* vt, int ldvt, float* superb) {\n  return LAPACKE_cgesvd(matrix_layout, jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, superb);\n}\n\ntemplate <>\ninline int gesvd<nm::Complex128, double>(int matrix_layout, char jobu, char jobvt, int m, int n, nm::Complex128* a, int lda, double* s, nm::Complex128* u, int ldu, nm::Complex128* vt, int ldvt, double* superb) {\n  return LAPACKE_zgesvd(matrix_layout, jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, superb);\n}\n\ntemplate <typename DType, typename CType>\ninline int lapacke_gesvd(int matrix_layout, char jobu, char jobvt, int m, int n, void* a, int lda, void* s, void* u, int ldu, void* vt, int ldvt, void* superb) {\n  return gesvd<DType,CType>(matrix_layout, jobu, jobvt, m, n, static_cast<DType*>(a), lda, static_cast<CType*>(s), static_cast<DType*>(u), ldu, static_cast<DType*>(vt), ldvt, static_cast<CType*>(superb));\n}\n\n//gesdd\ntemplate <typename DType, typename CType>\ninline int gesdd(int matrix_layout, char jobz, int m, int n, DType* a, int lda, CType* s, DType* u, int ldu, DType* vt, int ldvt) {\n  rb_raise(rb_eNotImpError, \"gesdd not yet implemented for non-BLAS dtypes\");\n  return 0;\n}\n\ntemplate <>\ninline int gesdd<float, float>(int matrix_layout, char jobz, int m, int n, float* a, int lda, float* s, float* u, int ldu, float* vt, int ldvt) {\n  return LAPACKE_sgesdd(matrix_layout, jobz, m, n, a, lda, s, u, ldu, vt, ldvt);\n}\n\ntemplate <>\ninline int gesdd<double, double>(int matrix_layout, char jobz, int m, int n, double* a, int lda, double* s, double* u, int ldu, double* vt, int ldvt) {\n  return LAPACKE_dgesdd(matrix_layout, jobz, m, n, a, lda, s, u, ldu, vt, ldvt);\n}\n\ntemplate <>\ninline int gesdd<nm::Complex64, float>(int matrix_layout, char jobz, int m, int n, nm::Complex64* a, int lda, float* s, nm::Complex64* u, int ldu, nm::Complex64* vt, int ldvt) {\n  return LAPACKE_cgesdd(matrix_layout, jobz, m, n, a, lda, s, u, ldu, vt, ldvt);\n}\n\ntemplate <>\ninline int gesdd<nm::Complex128, double>(int matrix_layout, char jobz, int m, int n, nm::Complex128* a, int lda, double* s, nm::Complex128* u, int ldu, nm::Complex128* vt, int ldvt) {\n  return LAPACKE_zgesdd(matrix_layout, jobz, m, n, a, lda, s, u, ldu, vt, ldvt);\n}\n\ntemplate <typename DType, typename CType>\ninline int lapacke_gesdd(int matrix_layout, char jobz, int m, int n, void* a, int lda, void* s, void* u, int ldu, void* vt, int ldvt) {\n  return gesdd<DType,CType>(matrix_layout, jobz, m, n, static_cast<DType*>(a), lda, static_cast<CType*>(s), static_cast<DType*>(u), ldu, static_cast<DType*>(vt), ldvt);\n}\n\n//geev\n//This one is a little tricky. The signature is different for the complex\n//versions than for the real ones. This is because real matrices can have\n//complex eigenvalues. For the complex types, the eigenvalues are just\n//returned in argument that's a complex array, but for real types the real\n//parts of the eigenvalues are returned\n//in one (array) argument, and the complex parts in a separate argument.\n//The solution is that the template takes an vi argument, but it is just\n//ignored in the specializations for complex types.\n\ntemplate <typename DType>\ninline int geev(int matrix_layout, char jobvl, char jobvr, int n, DType* a, int lda, DType* w, DType* wi, DType* vl, int ldvl, DType* vr, int ldvr) {\n  rb_raise(rb_eNotImpError, \"not yet implemented for non-BLAS dtypes\");\n  return -1;\n}\n\ntemplate <>\ninline int geev(int matrix_layout, char jobvl, char jobvr, int n, float* a, int lda, float* w, float* wi, float* vl, int ldvl, float* vr, int ldvr) {\n  return LAPACKE_sgeev(matrix_layout, jobvl, jobvr, n, a, lda, w, wi, vl, ldvl, vr, ldvr);\n}\n\ntemplate <>\ninline int geev(int matrix_layout, char jobvl, char jobvr, int n, double* a, int lda, double* w, double* wi, double* vl, int ldvl, double* vr, int ldvr) {\n  return LAPACKE_dgeev(matrix_layout, jobvl, jobvr, n, a, lda, w, wi, vl, ldvl, vr, ldvr);\n}\n\ntemplate <>\ninline int geev(int matrix_layout, char jobvl, char jobvr, int n, Complex64* a, int lda, Complex64* w, Complex64* wi, Complex64* vl, int ldvl, Complex64* vr, int ldvr) {\n  return LAPACKE_cgeev(matrix_layout, jobvl, jobvr, n, a, lda, w, vl, ldvl, vr, ldvr);\n}\n\ntemplate <>\ninline int geev(int matrix_layout, char jobvl, char jobvr, int n, Complex128* a, int lda, Complex128* w, Complex128* wi, Complex128* vl, int ldvl, Complex128* vr, int ldvr) {\n  return LAPACKE_zgeev(matrix_layout, jobvl, jobvr, n, a, lda, w, vl, ldvl, vr, ldvr);\n}\n\ntemplate <typename DType>\ninline int lapacke_geev(int matrix_layout, char jobvl, char jobvr, int n, void* a, int lda, void* w, void* wi, void* vl, int ldvl, void* vr, int ldvr) {\n  return geev<DType>(matrix_layout, jobvl, jobvr, n, static_cast<DType*>(a), lda, static_cast<DType*>(w), static_cast<DType*>(wi), static_cast<DType*>(vl), ldvl, static_cast<DType*>(vr), ldvr);\n}\n\n}}}\n\n#endif\n"
  },
  {
    "path": "ext/nmatrix_lapacke/math_lapacke.cpp",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == math_lapacke.cpp\n//\n// Ruby-exposed CBLAS and LAPACK functions that call BLAS\n// and LAPACKE functions.\n//\n\n#include \"data/data.h\"\n\n#include \"lapacke_nmatrix.h\"\n\n#include \"math_lapacke/cblas_local.h\"\n\n#include \"math/util.h\"\n\n#include \"math_lapacke/cblas_templates_lapacke.h\"\n\n#include \"math_lapacke/lapacke_templates.h\"\n\n\n/*\n * Forward Declarations\n */\n\nextern \"C\" {\n  /* BLAS Level 1. */\n  static VALUE nm_lapacke_cblas_scal(VALUE self, VALUE n, VALUE scale, VALUE vector, VALUE incx);\n  static VALUE nm_lapacke_cblas_nrm2(VALUE self, VALUE n, VALUE x, VALUE incx);\n  static VALUE nm_lapacke_cblas_asum(VALUE self, VALUE n, VALUE x, VALUE incx);\n  static VALUE nm_lapacke_cblas_rot(VALUE self, VALUE n, VALUE x, VALUE incx, VALUE y, VALUE incy, VALUE c, VALUE s);\n  static VALUE nm_lapacke_cblas_rotg(VALUE self, VALUE ab);\n  static VALUE nm_lapacke_cblas_imax(VALUE self, VALUE n, VALUE x, VALUE incx);\n\n  /* BLAS Level 2. */\n  static VALUE nm_lapacke_cblas_gemv(VALUE self, VALUE trans_a, VALUE m, VALUE n, VALUE vAlpha, VALUE a, VALUE lda,\n                             VALUE x, VALUE incx, VALUE vBeta, VALUE y, VALUE incy);\n\n  /* BLAS Level 3. */\n  static VALUE nm_lapacke_cblas_gemm(VALUE self, VALUE order, VALUE trans_a, VALUE trans_b, VALUE m, VALUE n, VALUE k, VALUE vAlpha,\n                             VALUE a, VALUE lda, VALUE b, VALUE ldb, VALUE vBeta, VALUE c, VALUE ldc);\n  static VALUE nm_lapacke_cblas_trsm(VALUE self, VALUE order, VALUE side, VALUE uplo, VALUE trans_a, VALUE diag, VALUE m, VALUE n,\n                             VALUE vAlpha, VALUE a, VALUE lda, VALUE b, VALUE ldb);\n  static VALUE nm_lapacke_cblas_trmm(VALUE self, VALUE order, VALUE side, VALUE uplo, VALUE trans_a, VALUE diag, VALUE m, VALUE n,\n                             VALUE alpha, VALUE a, VALUE lda, VALUE b, VALUE ldb);\n  static VALUE nm_lapacke_cblas_herk(VALUE self, VALUE order, VALUE uplo, VALUE trans, VALUE n, VALUE k, VALUE alpha, VALUE a,\n                             VALUE lda, VALUE beta, VALUE c, VALUE ldc);\n  static VALUE nm_lapacke_cblas_syrk(VALUE self, VALUE order, VALUE uplo, VALUE trans, VALUE n, VALUE k, VALUE alpha, VALUE a,\n                             VALUE lda, VALUE beta, VALUE c, VALUE ldc);\n\n  /* LAPACK. */\n  static VALUE nm_lapacke_lapacke_getrf(VALUE self, VALUE order, VALUE m, VALUE n, VALUE a, VALUE lda);\n  static VALUE nm_lapacke_lapacke_getrs(VALUE self, VALUE order, VALUE trans, VALUE n, VALUE nrhs, VALUE a, VALUE lda, VALUE ipiv, VALUE b, VALUE ldb);\n  static VALUE nm_lapacke_lapacke_getri(VALUE self, VALUE order, VALUE n, VALUE a, VALUE lda, VALUE ipiv);\n  static VALUE nm_lapacke_lapacke_potrf(VALUE self, VALUE order, VALUE uplo, VALUE n, VALUE a, VALUE lda);\n  static VALUE nm_lapacke_lapacke_potrs(VALUE self, VALUE order, VALUE uplo, VALUE n, VALUE nrhs, VALUE a, VALUE lda, VALUE b, VALUE ldb);\n  static VALUE nm_lapacke_lapacke_potri(VALUE self, VALUE order, VALUE uplo, VALUE n, VALUE a, VALUE lda);\n\n  static VALUE nm_lapacke_lapacke_geqrf(VALUE self, VALUE order, VALUE m, VALUE n, VALUE a, VALUE lda, VALUE tau);\n  static VALUE nm_lapacke_lapacke_ormqr(VALUE self, VALUE order, VALUE side, VALUE trans, VALUE m, VALUE n, VALUE k, VALUE a, VALUE lda, VALUE tau, VALUE c, VALUE ldc);\n  static VALUE nm_lapacke_lapacke_unmqr(VALUE self, VALUE order, VALUE side, VALUE trans, VALUE m, VALUE n, VALUE k, VALUE a, VALUE lda, VALUE tau, VALUE c, VALUE ldc);\n\n\n  static VALUE nm_lapacke_lapacke_gesvd(VALUE self, VALUE order, VALUE jobu, VALUE jobvt, VALUE m, VALUE n, VALUE a, VALUE lda, VALUE s, VALUE u, VALUE ldu, VALUE vt, VALUE ldvt, VALUE superb);\n  static VALUE nm_lapacke_lapacke_gesdd(VALUE self, VALUE order, VALUE jobz, VALUE m, VALUE n, VALUE a, VALUE lda, VALUE s, VALUE u, VALUE ldu, VALUE vt, VALUE ldvt);\n  static VALUE nm_lapacke_lapacke_geev(VALUE self, VALUE order, VALUE jobvl, VALUE jobvr, VALUE n, VALUE a, VALUE lda, VALUE w, VALUE wi, VALUE vl, VALUE ldvl, VALUE vr, VALUE ldvr);\n}\n\nextern \"C\" {\n\n///////////////////\n// Ruby Bindings //\n///////////////////\n\nvoid nm_math_init_lapack() {\n\n  VALUE cNMatrix_LAPACKE = rb_define_module_under(cNMatrix, \"LAPACKE\");\n\n  VALUE cNMatrix_LAPACKE_LAPACK = rb_define_module_under(cNMatrix_LAPACKE, \"LAPACK\");\n  VALUE cNMatrix_LAPACKE_BLAS = rb_define_module_under(cNMatrix_LAPACKE, \"BLAS\");\n\n  //BLAS Level 1\n  rb_define_singleton_method(cNMatrix_LAPACKE_BLAS, \"cblas_scal\", (METHOD)nm_lapacke_cblas_scal, 4);\n  rb_define_singleton_method(cNMatrix_LAPACKE_BLAS, \"cblas_nrm2\", (METHOD)nm_lapacke_cblas_nrm2, 3);\n  rb_define_singleton_method(cNMatrix_LAPACKE_BLAS, \"cblas_asum\", (METHOD)nm_lapacke_cblas_asum, 3);\n  rb_define_singleton_method(cNMatrix_LAPACKE_BLAS, \"cblas_rot\",  (METHOD)nm_lapacke_cblas_rot,  7);\n  rb_define_singleton_method(cNMatrix_LAPACKE_BLAS, \"cblas_rotg\", (METHOD)nm_lapacke_cblas_rotg, 1);\n  rb_define_singleton_method(cNMatrix_LAPACKE_BLAS, \"cblas_imax\", (METHOD)nm_lapacke_cblas_imax, 3);\n\n  //BLAS Level 2\n  rb_define_singleton_method(cNMatrix_LAPACKE_BLAS, \"cblas_gemv\", (METHOD)nm_lapacke_cblas_gemv, 11);\n\n  //BLAS Level 3\n  rb_define_singleton_method(cNMatrix_LAPACKE_BLAS, \"cblas_gemm\", (METHOD)nm_lapacke_cblas_gemm, 14);\n  rb_define_singleton_method(cNMatrix_LAPACKE_BLAS, \"cblas_trsm\", (METHOD)nm_lapacke_cblas_trsm, 12);\n  rb_define_singleton_method(cNMatrix_LAPACKE_BLAS, \"cblas_trmm\", (METHOD)nm_lapacke_cblas_trmm, 12);\n  rb_define_singleton_method(cNMatrix_LAPACKE_BLAS, \"cblas_syrk\", (METHOD)nm_lapacke_cblas_syrk, 11);\n  rb_define_singleton_method(cNMatrix_LAPACKE_BLAS, \"cblas_herk\", (METHOD)nm_lapacke_cblas_herk, 11);\n\n  /* LAPACK Functions */\n  rb_define_singleton_method(cNMatrix_LAPACKE_LAPACK, \"lapacke_getrf\", (METHOD)nm_lapacke_lapacke_getrf, 5);\n  rb_define_singleton_method(cNMatrix_LAPACKE_LAPACK, \"lapacke_getrs\", (METHOD)nm_lapacke_lapacke_getrs, 9);\n  rb_define_singleton_method(cNMatrix_LAPACKE_LAPACK, \"lapacke_getri\", (METHOD)nm_lapacke_lapacke_getri, 5);\n  rb_define_singleton_method(cNMatrix_LAPACKE_LAPACK, \"lapacke_potrf\", (METHOD)nm_lapacke_lapacke_potrf, 5);\n  rb_define_singleton_method(cNMatrix_LAPACKE_LAPACK, \"lapacke_potrs\", (METHOD)nm_lapacke_lapacke_potrs, 8);\n  rb_define_singleton_method(cNMatrix_LAPACKE_LAPACK, \"lapacke_potri\", (METHOD)nm_lapacke_lapacke_potri, 5);\n\n  rb_define_singleton_method(cNMatrix_LAPACKE_LAPACK, \"lapacke_geqrf\", (METHOD)nm_lapacke_lapacke_geqrf, 6);\n  rb_define_singleton_method(cNMatrix_LAPACKE_LAPACK, \"lapacke_ormqr\", (METHOD)nm_lapacke_lapacke_ormqr, 11);\n  rb_define_singleton_method(cNMatrix_LAPACKE_LAPACK, \"lapacke_unmqr\", (METHOD)nm_lapacke_lapacke_unmqr, 11);\n\n  rb_define_singleton_method(cNMatrix_LAPACKE_LAPACK, \"lapacke_gesvd\", (METHOD)nm_lapacke_lapacke_gesvd, 13);\n  rb_define_singleton_method(cNMatrix_LAPACKE_LAPACK, \"lapacke_gesdd\", (METHOD)nm_lapacke_lapacke_gesdd, 11);\n  rb_define_singleton_method(cNMatrix_LAPACKE_LAPACK, \"lapacke_geev\", (METHOD)nm_lapacke_lapacke_geev, 12);\n}\n\n/*\n * call-seq:\n *     NMatrix::BLAS.cblas_scal(n, alpha, vector, inc) -> NMatrix\n *\n * BLAS level 1 function +scal+. Works with all dtypes.\n *\n * Scale +vector+ in-place by +alpha+ and also return it. The operation is as\n * follows:\n *  x <- alpha * x\n *\n * - +n+ -> Number of elements of +vector+.\n * - +alpha+ -> Scalar value used in the operation.\n * - +vector+ -> NMatrix of shape [n,1] or [1,n]. Modified in-place.\n * - +inc+ -> Increment used in the scaling function. Should generally be 1.\n */\nstatic VALUE nm_lapacke_cblas_scal(VALUE self, VALUE n, VALUE alpha, VALUE vector, VALUE incx) {\n  nm::dtype_t dtype = NM_DTYPE(vector);\n\n  void* scalar = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n  rubyval_to_cval(alpha, dtype, scalar);\n\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::math::lapacke::cblas_scal, void, const int n,\n      const void* scalar, void* x, const int incx);\n\n  ttable[dtype](FIX2INT(n), scalar, NM_STORAGE_DENSE(vector)->elements,\n      FIX2INT(incx));\n\n  return vector;\n}\n\n/*\n * Call any of the cblas_xrotg functions as directly as possible.\n *\n * xROTG computes the elements of a Givens plane rotation matrix such that:\n *\n *  |  c s |   | a |   | r |\n *  | -s c | * | b | = | 0 |\n *\n * where r = +- sqrt( a**2 + b**2 ) and c**2 + s**2 = 1.\n *\n * The Givens plane rotation can be used to introduce zero elements into a matrix selectively.\n *\n * This function differs from most of the other raw BLAS accessors. Instead of\n * providing a, b, c, s as arguments, you should only provide a and b (the\n * inputs), and you should provide them as the first two elements of any dense\n * NMatrix type.\n *\n * The outputs [c,s] will be returned in a Ruby Array at the end; the input\n * NMatrix will also be modified in-place.\n *\n * This function, like the other cblas_ functions, does minimal type-checking.\n */\nstatic VALUE nm_lapacke_cblas_rotg(VALUE self, VALUE ab) {\n  static void (*ttable[nm::NUM_DTYPES])(void* a, void* b, void* c, void* s) = {\n      NULL, NULL, NULL, NULL, NULL, // can't represent c and s as integers, so no point in having integer operations.\n      nm::math::lapacke::cblas_rotg<float>,\n      nm::math::lapacke::cblas_rotg<double>,\n      nm::math::lapacke::cblas_rotg<nm::Complex64>,\n      nm::math::lapacke::cblas_rotg<nm::Complex128>,\n      NULL //nm::math::lapacke::cblas_rotg<nm::RubyObject>\n  };\n\n  nm::dtype_t dtype = NM_DTYPE(ab);\n\n  if (!ttable[dtype]) {\n    rb_raise(nm_eDataTypeError, \"this operation undefined for integer vectors\");\n    return Qnil;\n\n  } else {\n    NM_CONSERVATIVE(nm_register_value(&self));\n    NM_CONSERVATIVE(nm_register_value(&ab));\n    void *pC = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]),\n         *pS = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n\n    // extract A and B from the NVector (first two elements)\n    void* pA = NM_STORAGE_DENSE(ab)->elements;\n    void* pB = (char*)(NM_STORAGE_DENSE(ab)->elements) + DTYPE_SIZES[dtype];\n    // c and s are output\n\n    ttable[dtype](pA, pB, pC, pS);\n\n    VALUE result = rb_ary_new2(2);\n\n    if (dtype == nm::RUBYOBJ) {\n      rb_ary_store(result, 0, *reinterpret_cast<VALUE*>(pC));\n      rb_ary_store(result, 1, *reinterpret_cast<VALUE*>(pS));\n    } else {\n      rb_ary_store(result, 0, nm::rubyobj_from_cval(pC, dtype).rval);\n      rb_ary_store(result, 1, nm::rubyobj_from_cval(pS, dtype).rval);\n    }\n    NM_CONSERVATIVE(nm_unregister_value(&ab));\n    NM_CONSERVATIVE(nm_unregister_value(&self));\n    return result;\n  }\n}\n\n\n/*\n * Call any of the cblas_xrot functions as directly as possible.\n *\n * xROT is a BLAS level 1 routine (taking two vectors) which applies a plane rotation.\n *\n * It's tough to find documentation on xROT. Here are what we think the arguments are for:\n *  * n     :: number of elements to consider in x and y\n *  * x     :: a vector (expects an NVector)\n *  * incx  :: stride of x\n *  * y     :: a vector (expects an NVector)\n *  * incy  :: stride of y\n *  * c     :: cosine of the angle of rotation\n *  * s     :: sine of the angle of rotation\n *\n * Note that c and s will be the same dtype as x and y, except when x and y are complex. If x and y are complex, c and s\n * will be float for Complex64 or double for Complex128.\n *\n * You probably don't want to call this function. Instead, why don't you try rot, which is more flexible\n * with its arguments?\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n */\nstatic VALUE nm_lapacke_cblas_rot(VALUE self, VALUE n, VALUE x, VALUE incx, VALUE y, VALUE incy, VALUE c, VALUE s) {\n  static void (*ttable[nm::NUM_DTYPES])(const int N, void*, const int, void*, const int, const void*, const void*) = {\n      NULL, NULL, NULL, NULL, NULL, // can't represent c and s as integers, so no point in having integer operations.\n      nm::math::lapacke::cblas_rot<float,float>,\n      nm::math::lapacke::cblas_rot<double,double>,\n      nm::math::lapacke::cblas_rot<nm::Complex64,float>,\n      nm::math::lapacke::cblas_rot<nm::Complex128,double>,\n      nm::math::lapacke::cblas_rot<nm::RubyObject,nm::RubyObject>\n  };\n\n  nm::dtype_t dtype = NM_DTYPE(x);\n\n\n  if (!ttable[dtype]) {\n    rb_raise(nm_eDataTypeError, \"this operation undefined for integer vectors\");\n    return Qfalse;\n  } else {\n    void *pC, *pS;\n\n    // We need to ensure the cosine and sine arguments are the correct dtype -- which may differ from the actual dtype.\n    if (dtype == nm::COMPLEX64) {\n      pC = NM_ALLOCA_N(float,1);\n      pS = NM_ALLOCA_N(float,1);\n      rubyval_to_cval(c, nm::FLOAT32, pC);\n      rubyval_to_cval(s, nm::FLOAT32, pS);\n    } else if (dtype == nm::COMPLEX128) {\n      pC = NM_ALLOCA_N(double,1);\n      pS = NM_ALLOCA_N(double,1);\n      rubyval_to_cval(c, nm::FLOAT64, pC);\n      rubyval_to_cval(s, nm::FLOAT64, pS);\n    } else {\n      pC = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n      pS = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n      rubyval_to_cval(c, dtype, pC);\n      rubyval_to_cval(s, dtype, pS);\n    }\n\n\n    ttable[dtype](FIX2INT(n), NM_STORAGE_DENSE(x)->elements, FIX2INT(incx), NM_STORAGE_DENSE(y)->elements, FIX2INT(incy), pC, pS);\n\n    return Qtrue;\n  }\n}\n\n\n/*\n * Call any of the cblas_xnrm2 functions as directly as possible.\n *\n * xNRM2 is a BLAS level 1 routine which calculates the 2-norm of an n-vector x.\n *\n * Arguments:\n *  * n     :: length of x, must be at least 0\n *  * x     :: pointer to first entry of input vector\n *  * incx  :: stride of x, must be POSITIVE (ATLAS says non-zero, but 3.8.4 code only allows positive)\n *\n * You probably don't want to call this function. Instead, why don't you try nrm2, which is more flexible\n * with its arguments?\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n */\nstatic VALUE nm_lapacke_cblas_nrm2(VALUE self, VALUE n, VALUE x, VALUE incx) {\n\n  static void (*ttable[nm::NUM_DTYPES])(const int N, const void* X, const int incX, void* sum) = {\n      NULL, NULL, NULL, NULL, NULL, // no help for integers\n      nm::math::lapacke::cblas_nrm2<float32_t>,\n      nm::math::lapacke::cblas_nrm2<float64_t>,\n      nm::math::lapacke::cblas_nrm2<nm::Complex64>,\n      nm::math::lapacke::cblas_nrm2<nm::Complex128>,\n      nm::math::lapacke::cblas_nrm2<nm::RubyObject>\n  };\n\n  nm::dtype_t dtype  = NM_DTYPE(x);\n\n  if (!ttable[dtype]) {\n    rb_raise(nm_eDataTypeError, \"this operation undefined for integer vectors\");\n    return Qnil;\n\n  } else {\n    // Determine the return dtype and allocate it\n    nm::dtype_t rdtype = dtype;\n    if      (dtype == nm::COMPLEX64)  rdtype = nm::FLOAT32;\n    else if (dtype == nm::COMPLEX128) rdtype = nm::FLOAT64;\n\n    void *Result = NM_ALLOCA_N(char, DTYPE_SIZES[rdtype]);\n\n    ttable[dtype](FIX2INT(n), NM_STORAGE_DENSE(x)->elements, FIX2INT(incx), Result);\n\n    return nm::rubyobj_from_cval(Result, rdtype).rval;\n  }\n}\n\n\n\n/*\n * Call any of the cblas_xasum functions as directly as possible.\n *\n * xASUM is a BLAS level 1 routine which calculates the sum of absolute values of the entries\n * of a vector x.\n *\n * Arguments:\n *  * n     :: length of x, must be at least 0\n *  * x     :: pointer to first entry of input vector\n *  * incx  :: stride of x, must be POSITIVE (ATLAS says non-zero, but 3.8.4 code only allows positive)\n *\n * You probably don't want to call this function. Instead, why don't you try asum, which is more flexible\n * with its arguments?\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n */\nstatic VALUE nm_lapacke_cblas_asum(VALUE self, VALUE n, VALUE x, VALUE incx) {\n\n  static void (*ttable[nm::NUM_DTYPES])(const int N, const void* X, const int incX, void* sum) = {\n      nm::math::lapacke::cblas_asum<uint8_t>,\n      nm::math::lapacke::cblas_asum<int8_t>,\n      nm::math::lapacke::cblas_asum<int16_t>,\n      nm::math::lapacke::cblas_asum<int32_t>,\n      nm::math::lapacke::cblas_asum<int64_t>,\n      nm::math::lapacke::cblas_asum<float32_t>,\n      nm::math::lapacke::cblas_asum<float64_t>,\n      nm::math::lapacke::cblas_asum<nm::Complex64>,\n      nm::math::lapacke::cblas_asum<nm::Complex128>,\n      nm::math::lapacke::cblas_asum<nm::RubyObject>\n  };\n\n  nm::dtype_t dtype  = NM_DTYPE(x);\n\n  // Determine the return dtype and allocate it\n  nm::dtype_t rdtype = dtype;\n  if      (dtype == nm::COMPLEX64)  rdtype = nm::FLOAT32;\n  else if (dtype == nm::COMPLEX128) rdtype = nm::FLOAT64;\n\n  void *Result = NM_ALLOCA_N(char, DTYPE_SIZES[rdtype]);\n\n  ttable[dtype](FIX2INT(n), NM_STORAGE_DENSE(x)->elements, FIX2INT(incx), Result);\n\n  return nm::rubyobj_from_cval(Result, rdtype).rval;\n}\n\n/*\n * call-seq:\n *    NMatrix::BLAS.cblas_imax(n, vector, inc) -> Fixnum\n *\n * BLAS level 1 routine.\n *\n * Return the index of the largest element of +vector+.\n *\n * - +n+ -> Vector's size. Generally, you can use NMatrix#rows or NMatrix#cols.\n * - +vector+ -> A NMatrix of shape [n,1] or [1,n] with any dtype.\n * - +inc+ -> It's the increment used when searching. Use 1 except if you know\n *   what you're doing.\n */\nstatic VALUE nm_lapacke_cblas_imax(VALUE self, VALUE n, VALUE x, VALUE incx) {\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::math::lapacke::cblas_imax, int, const int n, const void* x, const int incx);\n\n  nm::dtype_t dtype = NM_DTYPE(x);\n\n  int index = ttable[dtype](FIX2INT(n), NM_STORAGE_DENSE(x)->elements, FIX2INT(incx));\n\n  // Convert to Ruby's Int value.\n  return INT2FIX(index);\n}\n\n/* Call any of the cblas_xgemv functions as directly as possible.\n *\n * The cblas_xgemv functions (dgemv, sgemv, cgemv, and zgemv) define the following operation:\n *\n *    y = alpha*op(A)*x + beta*y\n *\n * where op(A) is one of <tt>op(A) = A</tt>, <tt>op(A) = A**T</tt>, or the complex conjugate of A.\n *\n * Note that this will only work for dense matrices that are of types :float32, :float64, :complex64, and :complex128.\n * Other types are not implemented in BLAS, and while they exist in NMatrix, this method is intended only to\n * expose the ultra-optimized ATLAS versions.\n *\n * == Arguments\n * See: http://www.netlib.org/blas/dgemm.f\n *\n * You probably don't want to call this function. Instead, why don't you try cblas_gemv, which is more flexible\n * with its arguments?\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n */\nstatic VALUE nm_lapacke_cblas_gemv(VALUE self,\n                           VALUE trans_a,\n                           VALUE m, VALUE n,\n                           VALUE alpha,\n                           VALUE a, VALUE lda,\n                           VALUE x, VALUE incx,\n                           VALUE beta,\n                           VALUE y, VALUE incy)\n{\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::math::lapacke::cblas_gemv, bool, const enum CBLAS_TRANSPOSE, const int, const int, const void*, const void*, const int, const void*, const int, const void*, void*, const int)\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n  void *pAlpha = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]),\n       *pBeta  = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n  rubyval_to_cval(alpha, dtype, pAlpha);\n  rubyval_to_cval(beta, dtype, pBeta);\n\n  return ttable[dtype](blas_transpose_sym(trans_a), FIX2INT(m), FIX2INT(n), pAlpha, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), NM_STORAGE_DENSE(x)->elements, FIX2INT(incx), pBeta, NM_STORAGE_DENSE(y)->elements, FIX2INT(incy)) ? Qtrue : Qfalse;\n}\n\n/* Call any of the cblas_xgemm functions as directly as possible.\n *\n * The cblas_xgemm functions (dgemm, sgemm, cgemm, and zgemm) define the following operation:\n *\n *    C = alpha*op(A)*op(B) + beta*C\n *\n * where op(X) is one of <tt>op(X) = X</tt>, <tt>op(X) = X**T</tt>, or the complex conjugate of X.\n *\n * Note that this will only work for dense matrices that are of types :float32, :float64, :complex64, and :complex128.\n * Other types are not implemented in BLAS, and while they exist in NMatrix, this method is intended only to\n * expose the ultra-optimized ATLAS versions.\n *\n * == Arguments\n * See: http://www.netlib.org/blas/dgemm.f\n *\n * You probably don't want to call this function. Instead, why don't you try gemm, which is more flexible\n * with its arguments?\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n */\nstatic VALUE nm_lapacke_cblas_gemm(VALUE self,\n                           VALUE order,\n                           VALUE trans_a, VALUE trans_b,\n                           VALUE m, VALUE n, VALUE k,\n                           VALUE alpha,\n                           VALUE a, VALUE lda,\n                           VALUE b, VALUE ldb,\n                           VALUE beta,\n                           VALUE c, VALUE ldc)\n{\n  NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::math::lapacke::cblas_gemm, void, const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_TRANSPOSE trans_b, int m, int n, int k, void* alpha, void* a, int lda, void* b, int ldb, void* beta, void* c, int ldc);\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n  void *pAlpha = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]),\n       *pBeta  = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n  rubyval_to_cval(alpha, dtype, pAlpha);\n  rubyval_to_cval(beta, dtype, pBeta);\n\n  ttable[dtype](blas_order_sym(order), blas_transpose_sym(trans_a), blas_transpose_sym(trans_b), FIX2INT(m), FIX2INT(n), FIX2INT(k), pAlpha, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), NM_STORAGE_DENSE(b)->elements, FIX2INT(ldb), pBeta, NM_STORAGE_DENSE(c)->elements, FIX2INT(ldc));\n\n  return c;\n}\n\n\nstatic VALUE nm_lapacke_cblas_trsm(VALUE self,\n                           VALUE order,\n                           VALUE side, VALUE uplo,\n                           VALUE trans_a, VALUE diag,\n                           VALUE m, VALUE n,\n                           VALUE alpha,\n                           VALUE a, VALUE lda,\n                           VALUE b, VALUE ldb)\n{\n  static void (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER, const enum CBLAS_SIDE, const enum CBLAS_UPLO,\n                                        const enum CBLAS_TRANSPOSE, const enum CBLAS_DIAG,\n                                        const int m, const int n, const void* alpha, const void* a,\n                                        const int lda, void* b, const int ldb) = {\n      NULL, NULL, NULL, NULL, NULL, // integers not allowed due to division\n      nm::math::lapacke::cblas_trsm<float>,\n      nm::math::lapacke::cblas_trsm<double>,\n      cblas_ctrsm, cblas_ztrsm, // call directly, same function signature!\n      nm::math::lapacke::cblas_trsm<nm::RubyObject>\n  };\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n  if (!ttable[dtype]) {\n    rb_raise(nm_eDataTypeError, \"this matrix operation undefined for integer matrices\");\n  } else {\n    void *pAlpha = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n    rubyval_to_cval(alpha, dtype, pAlpha);\n\n    ttable[dtype](blas_order_sym(order), blas_side_sym(side), blas_uplo_sym(uplo), blas_transpose_sym(trans_a), blas_diag_sym(diag), FIX2INT(m), FIX2INT(n), pAlpha, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), NM_STORAGE_DENSE(b)->elements, FIX2INT(ldb));\n  }\n\n  return Qtrue;\n}\n\nstatic VALUE nm_lapacke_cblas_trmm(VALUE self,\n                           VALUE order,\n                           VALUE side, VALUE uplo,\n                           VALUE trans_a, VALUE diag,\n                           VALUE m, VALUE n,\n                           VALUE alpha,\n                           VALUE a, VALUE lda,\n                           VALUE b, VALUE ldb)\n{\n  static void (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER,\n                                        const enum CBLAS_SIDE, const enum CBLAS_UPLO,\n                                        const enum CBLAS_TRANSPOSE, const enum CBLAS_DIAG,\n                                        const int m, const int n, const void* alpha, const void* a,\n                                        const int lda, void* b, const int ldb) = {\n      NULL, NULL, NULL, NULL, NULL, // integers not allowed due to division\n      nm::math::lapacke::cblas_trmm<float>,\n      nm::math::lapacke::cblas_trmm<double>,\n      cblas_ctrmm, cblas_ztrmm, // call directly, same function signature!\n      NULL\n  };\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n  if (!ttable[dtype]) {\n    rb_raise(nm_eDataTypeError, \"this matrix operation not yet defined for non-BLAS dtypes\");\n  } else {\n    void *pAlpha = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n    rubyval_to_cval(alpha, dtype, pAlpha);\n\n    ttable[dtype](blas_order_sym(order), blas_side_sym(side), blas_uplo_sym(uplo), blas_transpose_sym(trans_a), blas_diag_sym(diag), FIX2INT(m), FIX2INT(n), pAlpha, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), NM_STORAGE_DENSE(b)->elements, FIX2INT(ldb));\n  }\n\n  return b;\n}\n\nstatic VALUE nm_lapacke_cblas_syrk(VALUE self,\n                           VALUE order,\n                           VALUE uplo,\n                           VALUE trans,\n                           VALUE n, VALUE k,\n                           VALUE alpha,\n                           VALUE a, VALUE lda,\n                           VALUE beta,\n                           VALUE c, VALUE ldc)\n{\n  static void (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER, const enum CBLAS_UPLO, const enum CBLAS_TRANSPOSE,\n                                        const int n, const int k, const void* alpha, const void* a,\n                                        const int lda, const void* beta, void* c, const int ldc) = {\n      NULL, NULL, NULL, NULL, NULL, // integers not allowed due to division\n      nm::math::lapacke::cblas_syrk<float>,\n      nm::math::lapacke::cblas_syrk<double>,\n      cblas_csyrk, cblas_zsyrk, // call directly, same function signature!\n      NULL\n  };\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n  if (!ttable[dtype]) {\n    rb_raise(nm_eDataTypeError, \"this matrix operation undefined for integer matrices\");\n  } else {\n    void *pAlpha = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]),\n         *pBeta = NM_ALLOCA_N(char, DTYPE_SIZES[dtype]);\n    rubyval_to_cval(alpha, dtype, pAlpha);\n    rubyval_to_cval(beta, dtype, pBeta);\n\n    ttable[dtype](blas_order_sym(order), blas_uplo_sym(uplo), blas_transpose_sym(trans), FIX2INT(n), FIX2INT(k), pAlpha, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), pBeta, NM_STORAGE_DENSE(c)->elements, FIX2INT(ldc));\n  }\n\n  return Qtrue;\n}\n\nstatic VALUE nm_lapacke_cblas_herk(VALUE self,\n                           VALUE order,\n                           VALUE uplo,\n                           VALUE trans,\n                           VALUE n, VALUE k,\n                           VALUE alpha,\n                           VALUE a, VALUE lda,\n                           VALUE beta,\n                           VALUE c, VALUE ldc)\n{\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n  if (dtype == nm::COMPLEX64) {\n    cblas_cherk(blas_order_sym(order), blas_uplo_sym(uplo), blas_transpose_sym(trans), FIX2INT(n), FIX2INT(k), NUM2DBL(alpha), NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), NUM2DBL(beta), NM_STORAGE_DENSE(c)->elements, FIX2INT(ldc));\n  } else if (dtype == nm::COMPLEX128) {\n    cblas_zherk(blas_order_sym(order), blas_uplo_sym(uplo), blas_transpose_sym(trans), FIX2INT(n), FIX2INT(k), NUM2DBL(alpha), NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), NUM2DBL(beta), NM_STORAGE_DENSE(c)->elements, FIX2INT(ldc));\n  } else\n    rb_raise(rb_eNotImpError, \"this matrix operation undefined for non-complex dtypes\");\n  return Qtrue;\n}\n\n/* Call any of the lapacke_xgetri functions as directly as possible.\n *\n * This version (the LAPACKE version) differs from the CLAPACK version in terms of the\n * input it expects (which is the output of getrf). See getrf for details.\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n *\n * Returns an array giving the pivot indices (normally these are argument #5).\n */\nstatic VALUE nm_lapacke_lapacke_getri(VALUE self, VALUE order, VALUE n, VALUE a, VALUE lda, VALUE ipiv) {\n  static int (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER, const int n, void* a, const int lda, const int* ipiv) = {\n      NULL, NULL, NULL, NULL, NULL,\n      nm::math::lapacke::lapacke_getri<float>,\n      nm::math::lapacke::lapacke_getri<double>,\n      nm::math::lapacke::lapacke_getri<nm::Complex64>,\n      nm::math::lapacke::lapacke_getri<nm::Complex128>,\n      NULL\n  };\n\n  // Allocate the C version of the pivot index array\n  int* ipiv_;\n  if (!RB_TYPE_P(ipiv, T_ARRAY)) {\n    rb_raise(rb_eArgError, \"ipiv must be of type Array\");\n  } else {\n    ipiv_ = NM_ALLOCA_N(int, RARRAY_LEN(ipiv));\n    for (int index = 0; index < RARRAY_LEN(ipiv); ++index) {\n      ipiv_[index] = FIX2INT( RARRAY_AREF(ipiv, index) );\n    }\n  }\n\n  if (!ttable[NM_DTYPE(a)]) {\n    rb_raise(rb_eNotImpError, \"this operation not yet implemented for non-BLAS dtypes\");\n  } else {\n    ttable[NM_DTYPE(a)](blas_order_sym(order), FIX2INT(n), NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), ipiv_);\n  }\n\n  return a;\n}\n\n/* Call any of the lapacke_xgetrf functions as directly as possible.\n *\n * The lapacke_getrf functions (dgetrf, sgetrf, cgetrf, and zgetrf) compute an LU factorization of a general M-by-N\n * matrix A using partial pivoting with row interchanges.\n *\n * The factorization has the form:\n *    A = P * L * U\n * where P is a permutation matrix, L is lower triangular with unit diagonal elements (lower trapezoidal if m > n),\n * and U is upper triangular (upper trapezoidal if m < n).\n *\n * This version of getrf (the LAPACKE one) differs from the CLAPACK version. The CLAPACK has\n * different behavior for row-major matrices (the upper matrix has unit diagonals instead of\n * the lower and it uses column permutations instead of rows).\n *\n * This is the right-looking level 3 BLAS version of the algorithm.\n *\n * == Arguments\n * See: http://www.netlib.org/lapack/double/dgetrf.f\n * (You don't need argument 5; this is the value returned by this function.)\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n *\n * Returns an array giving the pivot indices (normally these are argument #5).\n */\nstatic VALUE nm_lapacke_lapacke_getrf(VALUE self, VALUE order, VALUE m, VALUE n, VALUE a, VALUE lda) {\n  static int (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER, const int m, const int n, void* a, const int lda, int* ipiv) = {\n      NULL, NULL, NULL, NULL, NULL,\n      nm::math::lapacke::lapacke_getrf<float>,\n      nm::math::lapacke::lapacke_getrf<double>,\n      nm::math::lapacke::lapacke_getrf<nm::Complex64>,\n      nm::math::lapacke::lapacke_getrf<nm::Complex128>,\n      NULL\n  };\n\n  int M = FIX2INT(m),\n      N = FIX2INT(n);\n\n  // Allocate the pivot index array, which is of size MIN(M, N).\n  size_t ipiv_size = std::min(M,N);\n  int* ipiv = NM_ALLOCA_N(int, ipiv_size);\n\n  if (!ttable[NM_DTYPE(a)]) {\n    rb_raise(nm_eDataTypeError, \"this matrix operation undefined for integer matrices\");\n  } else {\n    ttable[NM_DTYPE(a)](blas_order_sym(order), M, N, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), ipiv);\n  }\n\n  // Result will be stored in a. We return ipiv as an array.\n  VALUE ipiv_array = rb_ary_new2(ipiv_size);\n  for (size_t i = 0; i < ipiv_size; ++i) {\n    rb_ary_store(ipiv_array, i, INT2FIX(ipiv[i]));\n  }\n\n  return ipiv_array;\n}\n\n/*\n * Call any of the lapacke_xgetrs functions as directly as possible.\n */\nstatic VALUE nm_lapacke_lapacke_getrs(VALUE self, VALUE order, VALUE trans, VALUE n, VALUE nrhs, VALUE a, VALUE lda, VALUE ipiv, VALUE b, VALUE ldb) {\n  static int (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER Order, char Trans, const int N,\n                                       const int NRHS, const void* A, const int lda, const int* ipiv, void* B,\n                                       const int ldb) = {\n      NULL, NULL, NULL, NULL, NULL,\n      nm::math::lapacke::lapacke_getrs<float>,\n      nm::math::lapacke::lapacke_getrs<double>,\n      nm::math::lapacke::lapacke_getrs<nm::Complex64>,\n      nm::math::lapacke::lapacke_getrs<nm::Complex128>,\n      NULL\n  };\n\n  // Allocate the C version of the pivot index array\n  int* ipiv_;\n  if (!RB_TYPE_P(ipiv, T_ARRAY)) {\n    rb_raise(rb_eArgError, \"ipiv must be of type Array\");\n  } else {\n    ipiv_ = NM_ALLOCA_N(int, RARRAY_LEN(ipiv));\n    for (int index = 0; index < RARRAY_LEN(ipiv); ++index) {\n      ipiv_[index] = FIX2INT( RARRAY_AREF(ipiv, index) );\n    }\n  }\n\n  if (!ttable[NM_DTYPE(a)]) {\n    rb_raise(nm_eDataTypeError, \"this matrix operation undefined for integer matrices\");\n  } else {\n    ttable[NM_DTYPE(a)](blas_order_sym(order), lapacke_transpose_sym(trans), FIX2INT(n), FIX2INT(nrhs), NM_STORAGE_DENSE(a)->elements, FIX2INT(lda),\n                        ipiv_, NM_STORAGE_DENSE(b)->elements, FIX2INT(ldb));\n  }\n\n  // b is both returned and modified directly in the argument list.\n  return b;\n}\n\n/* Call any of the LAPACKE_xpotrf functions as directly as possible.\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n */\nstatic VALUE nm_lapacke_lapacke_potrf(VALUE self, VALUE order, VALUE uplo, VALUE n, VALUE a, VALUE lda) {\n\n  static int (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER, char, const int n, void* a, const int lda) = {\n      NULL, NULL, NULL, NULL, NULL,\n      nm::math::lapacke::lapacke_potrf<float>,\n      nm::math::lapacke::lapacke_potrf<double>,\n      nm::math::lapacke::lapacke_potrf<nm::Complex64>,\n      nm::math::lapacke::lapacke_potrf<nm::Complex128>,\n      NULL\n  };\n\n  if (!ttable[NM_DTYPE(a)]) {\n    rb_raise(rb_eNotImpError, \"this operation not yet implemented for non-BLAS dtypes\");\n  } else {\n    ttable[NM_DTYPE(a)](blas_order_sym(order), lapacke_uplo_sym(uplo), FIX2INT(n), NM_STORAGE_DENSE(a)->elements, FIX2INT(lda));\n  }\n\n  return a;\n}\n\n/*\n * Call any of the LAPACKE_xpotrs functions as directly as possible.\n */\nstatic VALUE nm_lapacke_lapacke_potrs(VALUE self, VALUE order, VALUE uplo, VALUE n, VALUE nrhs, VALUE a, VALUE lda, VALUE b, VALUE ldb) {\n  static int (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER Order, char Uplo, const int N,\n                                       const int NRHS, const void* A, const int lda, void* B, const int ldb) = {\n      NULL, NULL, NULL, NULL, NULL,\n      nm::math::lapacke::lapacke_potrs<float>,\n      nm::math::lapacke::lapacke_potrs<double>,\n      nm::math::lapacke::lapacke_potrs<nm::Complex64>,\n      nm::math::lapacke::lapacke_potrs<nm::Complex128>,\n      NULL\n  };\n\n\n  if (!ttable[NM_DTYPE(a)]) {\n    rb_raise(nm_eDataTypeError, \"this matrix operation undefined for integer matrices\");\n  } else {\n\n    ttable[NM_DTYPE(a)](blas_order_sym(order), lapacke_uplo_sym(uplo), FIX2INT(n), FIX2INT(nrhs), NM_STORAGE_DENSE(a)->elements, FIX2INT(lda),\n                        NM_STORAGE_DENSE(b)->elements, FIX2INT(ldb));\n  }\n\n  // b is both returned and modified directly in the argument list.\n  return b;\n}\n\n/* Call any of the lapacke_xpotri functions as directly as possible.\n *\n * This function does almost no type checking. Seriously, be really careful when you call it! There's no exception\n * handling, so you can easily crash Ruby!\n */\nstatic VALUE nm_lapacke_lapacke_potri(VALUE self, VALUE order, VALUE uplo, VALUE n, VALUE a, VALUE lda) {\n\n  static int (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER, char, const int n, void* a, const int lda) = {\n      NULL, NULL, NULL, NULL, NULL,\n      nm::math::lapacke::lapacke_potri<float>,\n      nm::math::lapacke::lapacke_potri<double>,\n      nm::math::lapacke::lapacke_potri<nm::Complex64>,\n      nm::math::lapacke::lapacke_potri<nm::Complex128>,\n      NULL\n  };\n\n  if (!ttable[NM_DTYPE(a)]) {\n    rb_raise(rb_eNotImpError, \"this operation not yet implemented for non-BLAS dtypes\");\n  } else {\n    ttable[NM_DTYPE(a)](blas_order_sym(order), lapacke_uplo_sym(uplo), FIX2INT(n), NM_STORAGE_DENSE(a)->elements, FIX2INT(lda));\n  }\n\n  return a;\n}\n\n//badly need docs for gesvd, gesdd because of the real/complex mixing\n\n/*\n * xGESVD computes the singular value decomposition (SVD) of a real\n * M-by-N matrix A, optionally computing the left and/or right singular\n * vectors. The SVD is written\n *\n *      A = U * SIGMA * transpose(V)\n *\n * where SIGMA is an M-by-N matrix which is zero except for its\n * min(m,n) diagonal elements, U is an M-by-M orthogonal matrix, and\n * V is an N-by-N orthogonal matrix.  The diagonal elements of SIGMA\n * are the singular values of A; they are real and non-negative, and\n * are returned in descending order.  The first min(m,n) columns of\n * U and V are the left and right singular vectors of A.\n *\n * Note that the routine returns V**T, not V.\n */\nstatic VALUE nm_lapacke_lapacke_gesvd(VALUE self, VALUE order, VALUE jobu, VALUE jobvt, VALUE m, VALUE n, VALUE a, VALUE lda, VALUE s, VALUE u, VALUE ldu, VALUE vt, VALUE ldvt, VALUE superb) {\n  static int (*gesvd_table[nm::NUM_DTYPES])(int, char, char, int, int, void* a, int, void* s, void* u, int, void* vt, int, void* superb) = {\n    NULL, NULL, NULL, NULL, NULL, // no integer ops\n    nm::math::lapacke::lapacke_gesvd<float,float>,\n    nm::math::lapacke::lapacke_gesvd<double,double>,\n    nm::math::lapacke::lapacke_gesvd<nm::Complex64,float>,\n    nm::math::lapacke::lapacke_gesvd<nm::Complex128,double>,\n    NULL // no Ruby objects\n  };\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n\n  if (!gesvd_table[dtype]) {\n    rb_raise(rb_eNotImpError, \"this operation not yet implemented for non-BLAS dtypes\");\n    return Qfalse;\n  } else {\n    int M = FIX2INT(m),\n        N = FIX2INT(n);\n\n    char JOBU = lapack_svd_job_sym(jobu),\n         JOBVT = lapack_svd_job_sym(jobvt);\n\n    int info = gesvd_table[dtype](blas_order_sym(order),JOBU, JOBVT, M, N, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda),\n      NM_STORAGE_DENSE(s)->elements, NM_STORAGE_DENSE(u)->elements, FIX2INT(ldu), NM_STORAGE_DENSE(vt)->elements, FIX2INT(ldvt),\n      NM_STORAGE_DENSE(superb)->elements);\n    return INT2FIX(info);\n  }\n}\n\nstatic VALUE nm_lapacke_lapacke_gesdd(VALUE self, VALUE order, VALUE jobz, VALUE m, VALUE n, VALUE a, VALUE lda, VALUE s, VALUE u, VALUE ldu, VALUE vt, VALUE ldvt) {\n  static int (*gesdd_table[nm::NUM_DTYPES])(int, char, int, int, void* a, int, void* s, void* u, int, void* vt, int) = {\n    NULL, NULL, NULL, NULL, NULL, // no integer ops\n    nm::math::lapacke::lapacke_gesdd<float,float>,\n    nm::math::lapacke::lapacke_gesdd<double,double>,\n    nm::math::lapacke::lapacke_gesdd<nm::Complex64,float>,\n    nm::math::lapacke::lapacke_gesdd<nm::Complex128,double>,\n    NULL // no Ruby objects\n  };\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n\n  if (!gesdd_table[dtype]) {\n    rb_raise(rb_eNotImpError, \"this operation not yet implemented for non-BLAS dtypes\");\n    return Qfalse;\n  } else {\n    int M = FIX2INT(m),\n        N = FIX2INT(n);\n\n    char JOBZ = lapack_svd_job_sym(jobz);\n\n    int info = gesdd_table[dtype](blas_order_sym(order),JOBZ, M, N, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda),\n      NM_STORAGE_DENSE(s)->elements, NM_STORAGE_DENSE(u)->elements, FIX2INT(ldu), NM_STORAGE_DENSE(vt)->elements, FIX2INT(ldvt));\n    return INT2FIX(info);\n  }\n}\n\n/*\n * GEEV computes for an N-by-N real nonsymmetric matrix A, the\n * eigenvalues and, optionally, the left and/or right eigenvectors.\n *\n * The right eigenvector v(j) of A satisfies\n *                    A * v(j) = lambda(j) * v(j)\n * where lambda(j) is its eigenvalue.\n *\n * The left eigenvector u(j) of A satisfies\n *                 u(j)**H * A = lambda(j) * u(j)**H\n * where u(j)**H denotes the conjugate transpose of u(j).\n *\n * The computed eigenvectors are normalized to have Euclidean norm\n * equal to 1 and largest component real.\n */\n//note on wi\nstatic VALUE nm_lapacke_lapacke_geev(VALUE self, VALUE order, VALUE jobvl, VALUE jobvr, VALUE n, VALUE a, VALUE lda, VALUE w, VALUE wi, VALUE vl, VALUE ldvl, VALUE vr, VALUE ldvr) {\n  static int (*geev_table[nm::NUM_DTYPES])(int, char, char, int, void* a, int, void* w, void* wi, void* vl, int, void* vr, int) = {\n    NULL, NULL, NULL, NULL, NULL, // no integer ops\n    nm::math::lapacke::lapacke_geev<float>,\n    nm::math::lapacke::lapacke_geev<double>,\n    nm::math::lapacke::lapacke_geev<nm::Complex64>,\n    nm::math::lapacke::lapacke_geev<nm::Complex128>,\n    NULL // no Ruby objects\n  };\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n\n  if (!geev_table[dtype]) {\n    rb_raise(rb_eNotImpError, \"this operation not yet implemented for non-BLAS dtypes\");\n    return Qfalse;\n  } else {\n    int N = FIX2INT(n);\n\n    char JOBVL = lapack_evd_job_sym(jobvl),\n         JOBVR = lapack_evd_job_sym(jobvr);\n\n    void* A  = NM_STORAGE_DENSE(a)->elements;\n    void* W = NM_STORAGE_DENSE(w)->elements;\n    void* WI = wi == Qnil ? NULL : NM_STORAGE_DENSE(wi)->elements; //For complex, wi should be nil\n    void* VL = JOBVL == 'V' ? NM_STORAGE_DENSE(vl)->elements : NULL;\n    void* VR = JOBVR == 'V' ? NM_STORAGE_DENSE(vr)->elements : NULL;\n\n    // Perform the actual calculation.\n    int info = geev_table[dtype](blas_order_sym(order), JOBVL, JOBVR, N, A, FIX2INT(lda), W, WI, VL, FIX2INT(ldvl), VR, FIX2INT(ldvr));\n\n    return INT2FIX(info);\n  }\n}\n\n/* \n * GEQRF calculates the QR factorization for an MxN real or complex matrix.\n *  \n * The QR factorization is A = QR, where Q is orthogonal and R is Upper Triangular\n * +A+ is overwritten with the elements of R and Q with Q being represented by the \n * elements below A's diagonal and an array of scalar factors in the output NMatrix. \n *\n * The matrix Q is represented as a product of elementary reflectors\n *     Q = H(1) H(2) . . . H(k), where k = min(m,n).\n *\n * Each H(i) has the form\n *\n *     H(i) = I - tau * v * v'\n *\n * http://www.netlib.org/lapack/explore-html/d3/d69/dgeqrf_8f.html\n */\n\nstatic VALUE nm_lapacke_lapacke_geqrf(VALUE self, VALUE order, VALUE m, VALUE n, VALUE a, VALUE lda, VALUE tau) {\n  static int (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER order, const int m, const int n, void* a, const int lda, void* tau) = {\n      NULL, NULL, NULL, NULL, NULL,\n      nm::math::lapacke::lapacke_geqrf<float>,\n      nm::math::lapacke::lapacke_geqrf<double>,\n      nm::math::lapacke::lapacke_geqrf<nm::Complex64>,\n      nm::math::lapacke::lapacke_geqrf<nm::Complex128>,\n      NULL\n  };\n\n  int M = FIX2INT(m),\n      N = FIX2INT(n);\n \n  nm::dtype_t dtype = NM_DTYPE(a);\n\n  if (!ttable[dtype]) {\n    rb_raise(nm_eDataTypeError, \"this matrix operation is undefined for integer matrices\");\n    return Qfalse;\n  } else {\n    int info = ttable[dtype](blas_order_sym(order), M, N, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), NM_STORAGE_DENSE(tau)->elements);\n    return INT2FIX(info);\n  }\n}\n\n/* ORMQR calculates the orthogonal matrix Q from TAU and A after calling GEQRF on a real matrix\n *  \n *\n * The matrix Q is represented as a product of elementary reflectors\n *     Q = H(1) H(2) . . . H(k), where k = min(m,n).\n *\n * Each H(i) has the form\n *\n *     H(i) = I - tau * v * v'\n *  \n *  v is contained in the matrix passed to GEQRF     \n *\n *  www.netlib.org/lapack/explore-html/da/d82/dormqr_8f.html\n */\n\nstatic VALUE nm_lapacke_lapacke_ormqr(VALUE self, VALUE order, VALUE side, VALUE trans, VALUE m, VALUE n, VALUE k, VALUE a, VALUE lda, VALUE tau, VALUE c, VALUE ldc) {\n  static int (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER order, char side, char trans, const int m, const int n, const int k, void* a, const int lda, void* tau, void* c, const int ldc) = {\n      NULL, NULL, NULL, NULL, NULL,\n      nm::math::lapacke::lapacke_ormqr<float>,\n      nm::math::lapacke::lapacke_ormqr<double>,\n      NULL,NULL,NULL // no complex or Ruby objects\n  };\n\n  int M = FIX2INT(m),\n      N = FIX2INT(n),\n      K = FIX2INT(k); \n\n  char SIDE  = lapacke_side_sym(side),\n       TRANS = lapacke_transpose_sym(trans);\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n\n  if (!ttable[dtype]) {\n    rb_raise(nm_eDataTypeError, \"this matrix operation is undefined for integer matrices\");\n    return Qfalse;\n  } else {\n    int info = ttable[dtype](blas_order_sym(order), SIDE, TRANS, M, N, K, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), NM_STORAGE_DENSE(tau)->elements, NM_STORAGE_DENSE(c)->elements, FIX2INT(ldc));\n    return INT2FIX(info);\n  }\n}\n\n/* UNMQR calculates the orthogonal matrix Q from TAU and A after calling GEQRF on a complex matrix.\n *  \n *\n * The matrix Q is represented as a product of elementary reflectors\n *     Q = H(1) H(2) . . . H(k), where k = min(m,n).\n *\n * Each H(i) has the form\n *\n *     H(i) = I - tau * v * v'\n *  \n *  v is contained in the matrix passed to GEQRF     \n *\n *  http://www.netlib.org/lapack/explore-html/d5/d65/zunmqr_8f.html\n */\n\nstatic VALUE nm_lapacke_lapacke_unmqr(VALUE self, VALUE order, VALUE side, VALUE trans, VALUE m, VALUE n, VALUE k, VALUE a, VALUE lda, VALUE tau, VALUE c, VALUE ldc) {\n  static int (*ttable[nm::NUM_DTYPES])(const enum CBLAS_ORDER order, char side, char trans, const int m, const int n, const int k, void* a, const int lda, void* tau, void* c, const int ldc) = {\n      NULL, NULL, NULL, NULL, NULL,NULL,NULL, // no non-complex ops\n      nm::math::lapacke::lapacke_unmqr<nm::Complex64>,\n      nm::math::lapacke::lapacke_unmqr<nm::Complex128>,\n      NULL // no Ruby objects\n  };\n\n  int M = FIX2INT(m),\n      N = FIX2INT(n),\n      K = FIX2INT(k); \n\n  char SIDE  = lapacke_side_sym(side),\n       TRANS = lapacke_transpose_sym(trans);\n\n  nm::dtype_t dtype = NM_DTYPE(a);\n\n  if (!ttable[dtype]) {\n    rb_raise(nm_eDataTypeError, \"this matrix operation is valid only for complex datatypes\");\n    return Qfalse;\n  } else {\n    int info = ttable[dtype](blas_order_sym(order), SIDE, TRANS, M, N, K, NM_STORAGE_DENSE(a)->elements, FIX2INT(lda), NM_STORAGE_DENSE(tau)->elements, NM_STORAGE_DENSE(c)->elements, FIX2INT(ldc));\n    return INT2FIX(info);\n  }\n}\n\n}\n"
  },
  {
    "path": "ext/nmatrix_lapacke/nmatrix_lapacke.cpp",
    "content": "/////////////////////////////////////////////////////////////////////\n// = NMatrix\n//\n// A linear algebra library for scientific computation in Ruby.\n// NMatrix is part of SciRuby.\n//\n// NMatrix was originally inspired by and derived from NArray, by\n// Masahiro Tanaka: http://narray.rubyforge.org\n//\n// == Copyright Information\n//\n// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n//\n// Please see LICENSE.txt for additional copyright notices.\n//\n// == Contributing\n//\n// By contributing source code to SciRuby, you agree to be bound by\n// our Contributor Agreement:\n//\n// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n//\n// == nmatrix_lapacke.cpp\n//\n// Main file for nmatrix_lapacke extension\n//\n\n#include <ruby.h>\n\n#include \"nmatrix.h\"\n\n#include \"data/data.h\"\n\nextern \"C\" {\nvoid nm_math_init_lapack(); \n\nvoid Init_nmatrix_lapacke() {\n  nm_math_init_lapack();\n}\n\n}\n"
  },
  {
    "path": "lib/nmatrix/atlas.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2016, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2016, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == atlas.rb\n#\n# ruby file for the nmatrix-atlas gem. Loads the C extension and defines\n# nice ruby interfaces for ATLAS functions.\n#++\n\nrequire 'nmatrix/nmatrix.rb'\n #need to have nmatrix required first or else bad things will happen\nrequire_relative 'lapack_ext_common'\n\nNMatrix.register_lapack_extension(\"nmatrix-atlas\")\n\nrequire \"nmatrix_atlas.so\"\n\nclass NMatrix\n\n  #Add functions from the ATLAS C extension to the main LAPACK and BLAS modules.\n  #This will overwrite the original functions where applicable.\n  module LAPACK\n    class << self\n      NMatrix::ATLAS::LAPACK.singleton_methods.each do |m|\n        define_method m, NMatrix::ATLAS::LAPACK.method(m).to_proc\n      end\n    end\n  end\n\n  module BLAS\n    class << self\n      NMatrix::ATLAS::BLAS.singleton_methods.each do |m|\n        define_method m, NMatrix::ATLAS::BLAS.method(m).to_proc\n      end\n    end\n  end\n\n  module LAPACK\n    class << self\n      def posv(uplo, a, b)\n        raise(ShapeError, \"a must be square\") unless a.dim == 2 \\\n         && a.shape[0] == a.shape[1]\n\n        raise(ShapeError, \"number of rows of b must equal number of cols of a\") \\\n         unless a.shape[1] == b.shape[0]\n\n        raise(StorageTypeError, \"only works with dense matrices\") \\\n         unless a.stype == :dense && b.stype == :dense\n\n        raise(DataTypeError, \"only works for non-integer, non-object dtypes\") \\\n         if  a.integer_dtype? || a.object_dtype? || \\\n          b.integer_dtype? || b.object_dtype?\n\n        x     = b.clone\n        clone = a.clone\n        n = a.shape[0]\n        nrhs = b.shape[1]\n        clapack_potrf(:row, uplo, n, clone, n)\n        # Must transpose b before and after:\n        #  http://math-atlas.sourceforge.net/faq.html#RowSolve\n        x = x.transpose\n        clapack_potrs(:row, uplo, n, nrhs, clone, n, x, n)\n        x.transpose\n      end\n\n      def geev(matrix, which=:both)\n        raise(StorageTypeError, \"LAPACK functions only work on dense matrices\") \\\n         unless matrix.dense?\n\n        raise(ShapeError, \"eigenvalues can only be computed for square matrices\") \\\n         unless matrix.dim == 2 && matrix.shape[0] == matrix.shape[1]\n\n        jobvl = (which == :both || which == :left) ? :t : false\n        jobvr = (which == :both || which == :right) ? :t : false\n\n        n = matrix.shape[0]\n\n        # Outputs\n        eigenvalues = NMatrix.new([n, 1], dtype: matrix.dtype)\n         # For real dtypes this holds only the real part of the eigenvalues.\n        imag_eigenvalues = matrix.complex_dtype? ? nil : NMatrix.new([n, 1], \\\n         dtype: matrix.dtype) # For complex dtypes, this is unused.\n        left_output      = jobvl ? matrix.clone_structure : nil\n        right_output     = jobvr ? matrix.clone_structure : nil\n\n        # lapack_geev is a pure LAPACK routine so it expects column-major matrices,\n        # so we need to transpose the input as well as the output.\n        temporary_matrix = matrix.transpose\n        NMatrix::LAPACK::lapack_geev(jobvl, # compute left eigenvectors of A?\n                                     jobvr, # compute right eigenvectors of A? (left eigenvectors of A**T)\n                                     n, # order of the matrix\n                                     temporary_matrix,# input matrix (used as work)\n                                     n, # leading dimension of matrix\n                                     eigenvalues,# real part of computed eigenvalues\n                                     imag_eigenvalues,# imag part of computed eigenvalues\n                                     left_output,     # left eigenvectors, if applicable\n                                     n, # leading dimension of left_output\n                                     right_output,    # right eigenvectors, if applicable\n                                     n, # leading dimension of right_output\n                                     2*n)\n        left_output = left_output.transpose if jobvl\n        right_output = right_output.transpose if jobvr\n\n\n        # For real dtypes, transform left_output and right_output into correct forms.\n        # If the j'th and the (j+1)'th eigenvalues form a complex conjugate\n        # pair, then the j'th and (j+1)'th columns of the matrix are\n        # the real and imag parts of the eigenvector corresponding\n        # to the j'th eigenvalue.\n        if !matrix.complex_dtype?\n          complex_indices = []\n          n.times do |i|\n            complex_indices << i if imag_eigenvalues[i] != 0.0\n          end\n\n          if !complex_indices.empty?\n            # For real dtypes, put the real and imaginary parts together\n            eigenvalues = eigenvalues + imag_eigenvalues * \\\n             Complex(0.0,1.0)\n            left_output = left_output.cast(dtype: \\\n             NMatrix.upcast(:complex64, matrix.dtype)) if left_output\n            right_output = right_output.cast(dtype: NMatrix.upcast(:complex64, \\\n             matrix.dtype)) if right_output\n          end\n\n          complex_indices.each_slice(2) do |i, _|\n            if right_output\n              right_output[0...n,i] = right_output[0...n,i] + \\\n               right_output[0...n,i+1] * Complex(0.0,1.0)\n              right_output[0...n,i+1] = \\\n               right_output[0...n,i].complex_conjugate\n            end\n\n            if left_output\n              left_output[0...n,i] = left_output[0...n,i] + \\\n               left_output[0...n,i+1] * Complex(0.0,1.0)\n              left_output[0...n,i+1] = left_output[0...n,i].complex_conjugate\n            end\n          end\n        end\n\n        if which == :both\n          return [eigenvalues, left_output, right_output]\n        elsif which == :left\n          return [eigenvalues, left_output]\n        else\n          return [eigenvalues, right_output]\n        end\n      end\n\n      def gesvd(matrix, workspace_size=1)\n        result = alloc_svd_result(matrix)\n\n        m = matrix.shape[0]\n        n = matrix.shape[1]\n\n        # This is a pure LAPACK function so it expects column-major functions.\n        # So we need to transpose the input as well as the output.\n        matrix = matrix.transpose\n        NMatrix::LAPACK::lapack_gesvd(:a, :a, m, n, matrix, \\\n         m, result[1], result[0], m, result[2], n, workspace_size)\n        result[0] = result[0].transpose\n        result[2] = result[2].transpose\n        result\n      end\n\n      def gesdd(matrix, workspace_size=nil)\n        min_workspace_size = matrix.shape.min * \\\n         (6 + 4 * matrix.shape.min) + matrix.shape.max\n        workspace_size = min_workspace_size if \\\n         workspace_size.nil? || workspace_size < min_workspace_size\n\n        result = alloc_svd_result(matrix)\n\n        m = matrix.shape[0]\n        n = matrix.shape[1]\n\n        # This is a pure LAPACK function so it expects column-major functions.\n        # So we need to transpose the input as well as the output.\n        matrix = matrix.transpose\n        NMatrix::LAPACK::lapack_gesdd(:a, m, n, matrix, m, result[1], \\\n         result[0], m, result[2], n, workspace_size)\n        result[0] = result[0].transpose\n        result[2] = result[2].transpose\n        result\n      end\n    end\n  end\n\n  def invert!\n    raise(StorageTypeError, \"invert only works on dense matrices currently\") \\\n     unless self.dense?\n\n    raise(ShapeError, \"Cannot invert non-square matrix\") \\\n     unless shape[0] == shape[1]\n\n    raise(DataTypeError, \"Cannot invert an integer matrix in-place\") \\\n     if self.integer_dtype?\n\n    # Even though we are using the ATLAS plugin, we still might be missing\n    # CLAPACK (and thus clapack_getri) if we are on OS X.\n    if NMatrix.has_clapack?\n      # Get the pivot array; factor the matrix\n      # We can't used getrf! here since it doesn't have the clapack behavior,\n      # so it doesn't play nicely with clapack_getri\n      n = self.shape[0]\n      pivot = NMatrix::LAPACK::clapack_getrf(:row, n, n, self, n)\n      # Now calculate the inverse using the pivot array\n      NMatrix::LAPACK::clapack_getri(:row, n, self, n, pivot)\n      self\n    else\n      __inverse__(self,true)\n    end\n  end\n\n  def potrf!(which)\n    raise(StorageTypeError, \"ATLAS functions only work on dense matrices\") \\\n     unless self.dense?\n    raise(ShapeError, \"Cholesky decomposition only valid for square matrices\") \\\n     unless self.dim == 2 && self.shape[0] == self.shape[1]\n\n    NMatrix::LAPACK::clapack_potrf(:row, which, self.shape[0], self, self.shape[1])\n  end\nend\n"
  },
  {
    "path": "lib/nmatrix/blas.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2016, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2016, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == blas.rb\n#\n# This file contains the safer accessors for the BLAS functions\n# supported by NMatrix.\n#++\n\nmodule NMatrix::BLAS\n\n  #Add functions from C extension to main BLAS module\n  class << self\n    if jruby?\n      # BLAS functionalities for JRuby need to be implemented\n    else\n      NMatrix::Internal::BLAS.singleton_methods.each do |m|\n        define_method m, NMatrix::Internal::BLAS.method(m).to_proc\n      end\n    end\n  end\n\n  class << self\n    #\n    # call-seq:\n    #     gemm(a, b) -> NMatrix\n    #     gemm(a, b, c) -> NMatrix\n    #     gemm(a, b, c, alpha, beta) -> NMatrix\n    #\n    # Updates the value of C via the matrix multiplication\n    #   C = (alpha * A * B) + (beta * C)\n    # where +alpha+ and +beta+ are scalar values.\n    #\n    # * *Arguments* :\n    #   - +a+ -> Matrix A.\n    #   - +b+ -> Matrix B.\n    #   - +c+ -> Matrix C.\n    #   - +alpha+ -> A scalar value that multiplies A * B.\n    #   - +beta+ -> A scalar value that multiplies C.\n    #   - +transpose_a+ ->\n    #   - +transpose_b+ ->\n    #   - +m+ ->\n    #   - +n+ ->\n    #   - +k+ ->\n    #   - +lda+ ->\n    #   - +ldb+ ->\n    #   - +ldc+ ->\n    # * *Returns* :\n    #   - A NMatrix equal to (alpha * A * B) + (beta * C).\n    # * *Raises* :\n    #   - +ArgumentError+ -> +a+ and +b+ must be dense matrices.\n    #   - +ArgumentError+ -> +c+ must be +nil+ or a dense matrix.\n    #   - +ArgumentError+ -> The dtype of the matrices must be equal.\n    #\n    def gemm(a, b, c = nil, alpha = 1.0, beta = 0.0,\n             transpose_a = false, transpose_b = false, m = nil,\n             n = nil, k = nil, lda = nil, ldb = nil, ldc = nil)\n\n      raise(ArgumentError, 'Expected dense NMatrices as first two arguments.') \\\n            unless a.is_a?(NMatrix) and b.is_a? \\\n            (NMatrix) and a.stype == :dense and b.stype == :dense\n\n      raise(ArgumentError, 'Expected nil or dense NMatrix as third argument.') \\\n            unless c.nil? or (c.is_a?(NMatrix)  \\\n            and c.stype == :dense)\n      raise(ArgumentError, 'NMatrix dtype mismatch.') \\\n            unless a.dtype == b.dtype and (c ? a.dtype == c.dtype : true)\n\n      # First, set m, n, and k, which depend on whether we're taking the\n      # transpose of a and b.\n      if c\n        m ||= c.shape[0]\n        n ||= c.shape[1]\n        k ||= transpose_a ? a.shape[0] : a.shape[1]\n\n      else\n        if transpose_a\n          # Either :transpose or :complex_conjugate.\n          m ||= a.shape[1]\n          k ||= a.shape[0]\n\n        else\n          # No transpose.\n          m ||= a.shape[0]\n          k ||= a.shape[1]\n        end\n\n        n ||= transpose_b ? b.shape[0] : b.shape[1]\n        c  = NMatrix.new([m, n], dtype: a.dtype)\n      end\n\n      # I think these are independent of whether or not a transpose occurs.\n      lda ||= a.shape[1]\n      ldb ||= b.shape[1]\n      ldc ||= c.shape[1]\n\n      # NM_COMPLEX64 and NM_COMPLEX128 both require complex alpha and beta.\n      if a.dtype == :complex64 or a.dtype == :complex128\n        alpha = Complex(1.0, 0.0) if alpha == 1.0\n        beta  = Complex(0.0, 0.0) if beta  == 0.0\n      end\n\n      # For argument descriptions, see: http://www.netlib.org/blas/dgemm.f\n      ::NMatrix::BLAS.cblas_gemm(:row, transpose_a, transpose_b,\n       m, n, k, alpha, a, lda, b, ldb, beta, c, ldc)\n\n      return c\n    end\n\n    #\n    # call-seq:\n    #     gemv(a, x) -> NMatrix\n    #     gemv(a, x, y) -> NMatrix\n    #     gemv(a, x, y, alpha, beta) -> NMatrix\n    #\n    # Implements matrix-vector product via\n    #   y = (alpha * A * x) + (beta * y)\n    # where +alpha+ and +beta+ are scalar values.\n    #\n    # * *Arguments* :\n    #   - +a+ -> Matrix A.\n    #   - +x+ -> Vector x.\n    #   - +y+ -> Vector y.\n    #   - +alpha+ -> A scalar value that multiplies A * x.\n    #   - +beta+ -> A scalar value that multiplies y.\n    #   - +transpose_a+ ->\n    #   - +m+ ->\n    #   - +n+ ->\n    #   - +lda+ ->\n    #   - +incx+ ->\n    #   - +incy+ ->\n    # * *Returns* :\n    #   -\n    # * *Raises* :\n    #   - ++ ->\n    #\n    def gemv(a, x, y = nil, alpha = 1.0, beta = 0.0,\n             transpose_a = false, m = nil, n = nil, lda = nil,\n             incx = nil, incy = nil)\n      raise(ArgumentError, 'Expected dense NMatrices as first two arguments.') \\\n       unless a.is_a?(NMatrix) and x.is_a?(NMatrix) and \\\n       a.stype == :dense and x.stype == :dense\n\n      raise(ArgumentError, 'Expected nil or dense NMatrix as third argument.') \\\n       unless y.nil? or (y.is_a?(NMatrix) and y.stype == :dense)\n\n      raise(ArgumentError, 'NMatrix dtype mismatch.') \\\n       unless a.dtype == x.dtype and (y ? a.dtype == y.dtype : true)\n\n      m ||= transpose_a == :transpose ? a.shape[1] : a.shape[0]\n      n ||= transpose_a == :transpose ? a.shape[0] : a.shape[1]\n      raise(ArgumentError, \"dimensions don't match\") \\\n       unless x.shape[0] == n && x.shape[1] == 1\n\n      if y\n        raise(ArgumentError, \"dimensions don't match\") \\\n         unless y.shape[0] == m && y.shape[1] == 1\n      else\n        y = NMatrix.new([m,1], dtype: a.dtype)\n      end\n\n      lda  ||= a.shape[1]\n      incx ||= 1\n      incy ||= 1\n\n      ::NMatrix::BLAS.cblas_gemv(transpose_a, m, n,\n       alpha, a, lda, x, incx, beta, y, incy)\n\n      return y\n    end\n\n    #\n    # call-seq:\n    #     rot(x, y, c, s) -> [NMatrix, NMatrix]\n    #\n    # Apply plane rotation.\n    #\n    # * *Arguments* :\n    #   - +x+ -> NMatrix\n    #   - +y+ -> NMatrix\n    #   - +c+ -> cosine of the angle of rotation\n    #   - +s+ -> sine of the angle of rotation\n    #   - +incx+ -> stride of NMatrix +x+\n    #   - +incy+ -> stride of NMatrix +y+\n    #   - +n+ -> number of elements to consider in x and y\n    #   - +in_place+ -> true   if it's okay to modify the supplied\n    #                           +x+ and +y+ parameters directly;\n    #                   false if not. Default is false.\n    # * *Returns* :\n    #   - Array with the results, in the format [xx, yy]\n    # * *Raises* :\n    #   - +ArgumentError+ -> Expected dense NMatrices as first two arguments.\n    #   - +ArgumentError+ -> NMatrix dtype mismatch.\n    #   - +ArgumentError+ -> Need to supply n for non-standard incx,\n    #                         incy values.\n    #\n    def rot(x, y, c, s, incx = 1, incy = 1, n = nil, in_place=false)\n      raise(ArgumentError, 'Expected dense NMatrices as first two arguments.') \\\n       unless x.is_a?(NMatrix) and y.is_a?(NMatrix) \\\n       and x.stype == :dense and y.stype == :dense\n\n      raise(ArgumentError, 'NMatrix dtype mismatch.') \\\n       unless x.dtype == y.dtype\n\n      raise(ArgumentError, 'Need to supply n for non-standard incx, incy values') \\\n       if n.nil? && incx != 1 && incx != -1 && incy != 1 && incy != -1\n\n      n ||= [x.size/incx.abs, y.size/incy.abs].min\n\n      if in_place\n        ::NMatrix::BLAS.cblas_rot(n, x, incx, y, incy, c, s)\n        return [x,y]\n      else\n        xx = x.clone\n        yy = y.clone\n\n        ::NMatrix::BLAS.cblas_rot(n, xx, incx, yy, incy, c, s)\n\n        return [xx,yy]\n      end\n    end\n\n\n    #\n    # call-seq:\n    #     rot!(x, y, c, s) -> [NMatrix, NMatrix]\n    #\n    # Apply plane rotation directly to +x+ and +y+.\n    #\n    # See rot for arguments.\n    def rot!(x, y, c, s, incx = 1, incy = 1, n = nil)\n      rot(x,y,c,s,incx,incy,n,true)\n    end\n\n\n    #\n    # call-seq:\n    #     rotg(ab) -> [Numeric, Numeric]\n    #\n    # Apply givens plane rotation to the coordinates (a,b),\n    #  returning the cosine and sine of the angle theta.\n    #\n    # Since the givens rotation includes a square root,\n    #  integers are disallowed.\n    #\n    # * *Arguments* :\n    #   - +ab+ -> NMatrix with two elements\n    # * *Returns* :\n    #   - Array with the results, in the format [cos(theta), sin(theta)]\n    # * *Raises* :\n    #   - +ArgumentError+ -> Expected dense NMatrix of size 2\n    #\n    def rotg(ab)\n      raise(ArgumentError, \"Expected dense NMatrix of shape [2,1] or [1,2]\") \\\n       unless ab.is_a?(NMatrix) && ab.stype == :dense && ab.size == 2\n\n      ::NMatrix::BLAS.cblas_rotg(ab)\n    end\n\n\n    #\n    # call-seq:\n    #     asum(x, incx, n) -> Numeric\n    #\n    # Calculate the sum of absolute values of the entries of a\n    #  vector +x+ of size +n+\n    #\n    # * *Arguments* :\n    #   - +x+ -> an NMatrix (will also allow an NMatrix,\n    #             but will treat it as if it's a vector )\n    #   - +incx+ -> the skip size (defaults to 1)\n    #   - +n+ -> the size of +x+ (defaults to +x.size / incx+)\n    # * *Returns* :\n    #   - The sum\n    # * *Raises* :\n    #   - +ArgumentError+ -> Expected dense NMatrix for arg 0\n    #   - +RangeError+ -> n out of range\n    #\n    def asum(x, incx = 1, n = nil)\n      n ||= x.size / incx\n      raise(ArgumentError, \"Expected dense NMatrix for arg 0\") \\\n       unless x.is_a?(NMatrix)\n\n      raise(RangeError, \"n out of range\") \\\n       if n*incx > x.size || n*incx <= 0 || n <= 0\n       ::NMatrix::BLAS.cblas_asum(n, x, incx)\n    end\n\n    #\n    # call-seq:\n    #     nrm2(x, incx, n)\n    #\n    # Calculate the 2-norm of a vector +x+ of size +n+\n    #\n    # * *Arguments* :\n    #   - +x+ -> an NMatrix (will also allow an\n    #             NMatrix, but will treat it as if it's a vector )\n    #   - +incx+ -> the skip size (defaults to 1)\n    #   - +n+ -> the size of +x+ (defaults to +x.size / incx+)\n    # * *Returns* :\n    #   - The 2-norm\n    # * *Raises* :\n    #   - +ArgumentError+ -> Expected dense NMatrix for arg 0\n    #   - +RangeError+ -> n out of range\n    #\n    def nrm2(x, incx = 1, n = nil)\n      n ||= x.size / incx\n      raise(ArgumentError, \"Expected dense NMatrix for arg 0\") \\\n       unless x.is_a?(NMatrix)\n\n      raise(RangeError, \"n out of range\") \\\n       if n*incx > x.size || n*incx <= 0 || n <= 0\n       ::NMatrix::BLAS.cblas_nrm2(n, x, incx)\n    end\n\n    #\n    # call-seq:\n    #     scal(alpha, vector, incx, n)\n    #\n    # Scale a matrix by a given scaling factor\n    #\n    # * *Arguments* :\n    #   - +alpha+ -> a scaling factor\n    #   - +vector+ -> an NMatrix\n    #   - +incx+ -> the skip size (defaults to 1)\n    #   - +n+ -> the size of +x+ (defaults to +x.size / incx+)\n    # * *Returns* :\n    #   - The scaling result\n    # * *Raises* :\n    #   - +ArgumentError+ -> Expected dense NMatrix for arg 0\n    #   - +RangeError+ -> n out of range\n    #\n    def scal(alpha, vector, incx=1, n=nil)\n      n ||= vector.size / incx\n      raise(ArgumentError, \"Expected dense NMatrix for arg 0\") unless vector.is_a?(NMatrix)\n      raise(RangeError, \"n out of range\") if n*incx > vector.size || n*incx <= 0 || n <= 0\n      ::NMatrix::BLAS.cblas_scal(n, alpha, vector, incx)\n    end\n\n    # The following are functions that used to be implemented in C, but\n    # now require nmatrix-atlas or nmatrix-lapcke to run properly, so we can just\n    # implemented their stubs in Ruby.\n    def cblas_trmm(order, side, uplo, trans_a, diag, m, n, alpha, a, lda, b, ldb)\n      raise(NotImplementedError,\"cblas_trmm requires either the\n       nmatrix-lapacke or nmatrix-atlas gem\")\n    end\n\n    def cblas_syrk(order, uplo, trans, n, k, alpha, a, lda, beta, c, ldc)\n      raise(NotImplementedError,\"cblas_syrk requires either the\n       nmatrix-lapacke or nmatrix-atlas gem\")\n    end\n\n    def cblas_herk(order, uplo, trans, n, k, alpha, a, lda, beta, c, ldc)\n      raise(NotImplementedError,\"cblas_herk requires either the\n       nmatrix-lapacke or nmatrix-atlas gem\")\n    end\n  end\nend\n"
  },
  {
    "path": "lib/nmatrix/cruby/math.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == math.rb\n#\n# Math functionality for NMatrix, along with any NMatrix instance\n# methods that correspond to ATLAS/BLAS/LAPACK functions (e.g.,\n# laswp).\n#++\n\nclass NMatrix\n\n  #\n  # call-seq:\n  #     getrf! -> Array\n  #\n  # LU factorization of a general M-by-N matrix +A+ using partial pivoting with\n  # row interchanges. The LU factorization is A = PLU, where P is a row permutation\n  # matrix, L is a lower triangular matrix with unit diagonals, and U is an upper\n  # triangular matrix (note that this convention is different from the\n  # clapack_getrf behavior, but matches the standard LAPACK getrf).\n  # +A+ is overwritten with the elements of L and U (the unit\n  # diagonal elements of L are not saved). P is not returned directly and must be\n  # constructed from the pivot array ipiv. The row indices in ipiv are indexed\n  # starting from 1.\n  # Only works for dense matrices.\n  #\n  # * *Returns* :\n  #   - The IPIV vector. The L and U matrices are stored in A.\n  # * *Raises* :\n  #   - +StorageTypeError+ -> ATLAS functions only work on dense matrices.\n  #\n  def getrf!\n    raise(StorageTypeError, \"ATLAS functions only work on dense matrices\") unless self.dense?\n\n    #For row-major matrices, clapack_getrf uses a different convention than\n    #described above (U has unit diagonal elements instead of L and columns\n    #are interchanged rather than rows). For column-major matrices, clapack\n    #uses the stanard conventions. So we just transpose the matrix before\n    #and after calling clapack_getrf.\n    #Unfortunately, this is not a very good way, uses a lot of memory.\n    temp = self.transpose\n    ipiv = NMatrix::LAPACK::clapack_getrf(:col, self.shape[0], self.shape[1], temp, self.shape[0])\n    temp = temp.transpose\n    self[0...self.shape[0], 0...self.shape[1]] = temp\n\n    #for some reason, in clapack_getrf, the indices in ipiv start from 0\n    #instead of 1 as in LAPACK.\n    ipiv.each_index { |i| ipiv[i]+=1 }\n\n    return ipiv\n  end\n\n  #\n  # call-seq:\n  #     geqrf! -> shape.min x 1 NMatrix\n  #\n  # QR factorization of a general M-by-N matrix +A+.\n  #\n  # The QR factorization is A = QR, where Q is orthogonal and R is Upper Triangular\n  # +A+ is overwritten with the elements of R and Q with Q being represented by the\n  # elements below A's diagonal and an array of scalar factors in the output NMatrix.\n  #\n  # The matrix Q is represented as a product of elementary reflectors\n  #     Q = H(1) H(2) . . . H(k), where k = min(m,n).\n  #\n  # Each H(i) has the form\n  #\n  #     H(i) = I - tau * v * v'\n  #\n  # http://www.netlib.org/lapack/explore-html/d3/d69/dgeqrf_8f.html\n  #\n  # Only works for dense matrices.\n  #\n  # * *Returns* :\n  #   - Vector TAU. Q and R are stored in A. Q is represented by TAU and A\n  # * *Raises* :\n  #   - +StorageTypeError+ -> LAPACK functions only work on dense matrices.\n  #\n  def geqrf!\n    # The real implementation is in lib/nmatrix/lapacke.rb\n    raise(NotImplementedError, \"geqrf! requires the nmatrix-lapacke gem\")\n  end\n\n  #\n  # call-seq:\n  #     ormqr(tau) -> NMatrix\n  #     ormqr(tau, side, transpose, c) -> NMatrix\n  #\n  # Returns the product Q * c or c * Q after a call to geqrf! used in QR factorization.\n  # +c+ is overwritten with the elements of the result NMatrix if supplied. Q is the orthogonal matrix\n  # represented by tau and the calling NMatrix\n  #\n  # Only works on float types, use unmqr for complex types.\n  #\n  # == Arguments\n  #\n  # * +tau+ - vector containing scalar factors of elementary reflectors\n  # * +side+ - direction of multiplication [:left, :right]\n  # * +transpose+ - apply Q with or without transpose [false, :transpose]\n  # * +c+ - NMatrix multplication argument that is overwritten, no argument assumes c = identity\n  #\n  # * *Returns* :\n  #\n  #   - Q * c or c * Q Where Q may be transposed before multiplication.\n  #\n  #\n  # * *Raises* :\n  #   - +StorageTypeError+ -> LAPACK functions only work on dense matrices.\n  #   - +TypeError+ -> Works only on floating point matrices, use unmqr for complex types\n  #   - +TypeError+ -> c must have the same dtype as the calling NMatrix\n  #\n  def ormqr(tau, side=:left, transpose=false, c=nil)\n    # The real implementation is in lib/nmatrix/lapacke.rb\n    raise(NotImplementedError, \"ormqr requires the nmatrix-lapacke gem\")\n\n  end\n\n  #\n  # call-seq:\n  #     unmqr(tau) -> NMatrix\n  #     unmqr(tau, side, transpose, c) -> NMatrix\n  #\n  # Returns the product Q * c or c * Q after a call to geqrf! used in QR factorization.\n  # +c+ is overwritten with the elements of the result NMatrix if it is supplied. Q is the orthogonal matrix\n  # represented by tau and the calling NMatrix\n  #\n  # Only works on complex types, use ormqr for float types.\n  #\n  # == Arguments\n  #\n  # * +tau+ - vector containing scalar factors of elementary reflectors\n  # * +side+ - direction of multiplication [:left, :right]\n  # * +transpose+ - apply Q as Q or its complex conjugate [false, :complex_conjugate]\n  # * +c+ - NMatrix multplication argument that is overwritten, no argument assumes c = identity\n  #\n  # * *Returns* :\n  #\n  #   - Q * c or c * Q Where Q may be transformed to its complex conjugate before multiplication.\n  #\n  #\n  # * *Raises* :\n  #   - +StorageTypeError+ -> LAPACK functions only work on dense matrices.\n  #   - +TypeError+ -> Works only on floating point matrices, use unmqr for complex types\n  #   - +TypeError+ -> c must have the same dtype as the calling NMatrix\n  #\n  def unmqr(tau, side=:left, transpose=false, c=nil)\n    # The real implementation is in lib/nmatrix/lapacke.rb\n    raise(NotImplementedError, \"unmqr requires the nmatrix-lapacke gem\")\n  end\n\n  #\n  # call-seq:\n  #     potrf!(upper_or_lower) -> NMatrix\n  #\n  # Cholesky factorization of a symmetric positive-definite matrix -- or, if complex,\n  # a Hermitian positive-definite matrix +A+.\n  # The result will be written in either the upper or lower triangular portion of the\n  # matrix, depending on whether the argument is +:upper+ or +:lower+.\n  # Also the function only reads in the upper or lower part of the matrix,\n  # so it doesn't actually have to be symmetric/Hermitian.\n  # However, if the matrix (i.e. the symmetric matrix implied by the lower/upper\n  # half) is not positive-definite, the function will return nonsense.\n  #\n  # This functions requires either the nmatrix-atlas or nmatrix-lapacke gem\n  # installed.\n  #\n  # * *Returns* :\n  #   the triangular portion specified by the parameter\n  # * *Raises* :\n  #   - +StorageTypeError+ -> ATLAS functions only work on dense matrices.\n  #   - +ShapeError+ -> Must be square.\n  #   - +NotImplementedError+ -> If called without nmatrix-atlas or nmatrix-lapacke gem\n  #\n  def potrf!(which)\n    # The real implementation is in the plugin files.\n    raise(NotImplementedError, \"potrf! requires either the nmatrix-atlas or nmatrix-lapacke gem\")\n  end\n\n  def potrf_upper!\n    potrf! :upper\n  end\n\n  def potrf_lower!\n    potrf! :lower\n  end\n\n\n  #\n  # call-seq:\n  #     factorize_cholesky -> [upper NMatrix, lower NMatrix]\n  #\n  # Calculates the Cholesky factorization of a matrix and returns the\n  # upper and lower matrices such that A=LU and L=U*, where * is\n  # either the transpose or conjugate transpose.\n  #\n  # Unlike potrf!, this makes method requires that the original is matrix is\n  # symmetric or Hermitian. However, it is still your responsibility to make\n  # sure it is positive-definite.\n  def factorize_cholesky\n    raise \"Matrix must be symmetric/Hermitian for Cholesky factorization\" unless self.hermitian?\n    l = self.clone.potrf_lower!.tril!\n    u = l.conjugate_transpose\n    [u,l]\n  end\n\n  #\n  # call-seq:\n  #     factorize_lu -> ...\n  #\n  # LU factorization of a matrix. Optionally return the permutation matrix.\n  #   Note that computing the permutation matrix will introduce a slight memory\n  #   and time overhead.\n  #\n  # == Arguments\n  #\n  # +with_permutation_matrix+ - If set to *true* will return the permutation\n  #   matrix alongwith the LU factorization as a second return value.\n  #\n  def factorize_lu with_permutation_matrix=nil\n    raise(NotImplementedError, \"only implemented for dense storage\") unless self.stype == :dense\n    raise(NotImplementedError, \"matrix is not 2-dimensional\") unless self.dimensions == 2\n\n    t     = self.clone\n    pivot = t.getrf!\n    return t unless with_permutation_matrix\n\n    [t, FactorizeLUMethods.permutation_matrix_from(pivot)]\n  end\n\n  #\n  # call-seq:\n  #     factorize_qr -> [Q,R]\n  #\n  # QR factorization of a matrix without column pivoting.\n  # Q is orthogonal and R is upper triangular if input is square or upper trapezoidal if\n  # input is rectangular.\n  #\n  # Only works for dense matrices.\n  #\n  # * *Returns* :\n  #   - Array containing Q and R matrices\n  #\n  # * *Raises* :\n  #   - +StorageTypeError+ -> only implemented for desnse storage.\n  #   - +ShapeError+ -> Input must be a 2-dimensional matrix to have a QR decomposition.\n  #\n  def factorize_qr\n    raise(NotImplementedError, \"only implemented for dense storage\") unless self.stype == :dense\n    raise(ShapeError, \"Input must be a 2-dimensional matrix to have a QR decomposition\") unless self.dim == 2\n\n    rows, columns = self.shape\n    r = self.clone\n    tau =  r.geqrf!\n\n    #Obtain Q\n    q = self.complex_dtype? ? r.unmqr(tau) : r.ormqr(tau)\n\n    #Obtain R\n    if rows <= columns\n      r.upper_triangle!\n    #Need to account for upper trapezoidal structure if R is a tall rectangle (rows > columns)\n    else\n      r[0...columns, 0...columns].upper_triangle!\n      r[columns...rows, 0...columns] = 0\n    end\n\n    [q,r]\n  end\n\n  # Solve the matrix equation AX = B, where A is +self+, B is the first\n  # argument, and X is returned. A must be a nxn square matrix, while B must be\n  # nxm. Only works with dense matrices and non-integer, non-object data types.\n  #\n  # == Arguments\n  #\n  # * +b+ - the right hand side\n  #\n  # == Options\n  #\n  # * +form+ - Signifies the form of the matrix A in the linear system AX=B.\n  #   If not set then it defaults to +:general+, which uses an LU solver.\n  #   Other possible values are +:lower_tri+, +:upper_tri+ and +:pos_def+ (alternatively,\n  #   non-abbreviated symbols +:lower_triangular+, +:upper_triangular+,\n  #   and +:positive_definite+ can be used.\n  #   If +:lower_tri+ or +:upper_tri+ is set, then a specialized linear solver for linear\n  #   systems AX=B with a lower or upper triangular matrix A is used. If +:pos_def+ is chosen,\n  #   then the linear system is solved via the Cholesky factorization.\n  #   Note that when +:lower_tri+ or +:upper_tri+ is used, then the algorithm just assumes that\n  #   all entries in the lower/upper triangle of the matrix are zeros without checking (which\n  #   can be useful in certain applications).\n  #\n  #\n  # == Usage\n  #\n  #   a = NMatrix.new [2,2], [3,1,1,2], dtype: dtype\n  #   b = NMatrix.new [2,1], [9,8], dtype: dtype\n  #   a.solve(b)\n  #\n  #   # solve an upper triangular linear system more efficiently:\n  #   require 'benchmark'\n  #   require 'nmatrix/lapacke'\n  #   rand_mat = NMatrix.random([10000, 10000], dtype: :float64)\n  #   a = rand_mat.triu\n  #   b = NMatrix.random([10000, 10], dtype: :float64)\n  #   Benchmark.bm(10) do |bm|\n  #     bm.report('general') { a.solve(b) }\n  #     bm.report('upper_tri') { a.solve(b, form: :upper_tri) }\n  #   end\n  #   #                   user     system      total        real\n  #   #  general     73.170000   0.670000  73.840000 ( 73.810086)\n  #   #  upper_tri    0.180000   0.000000   0.180000 (  0.182491)\n  #\n  def solve(b, opts = {})\n    raise(ShapeError, \"Must be called on square matrix\") unless self.dim == 2 && self.shape[0] == self.shape[1]\n    raise(ShapeError, \"number of rows of b must equal number of cols of self\") if\n      self.shape[1] != b.shape[0]\n    raise(ArgumentError, \"only works with dense matrices\") if self.stype != :dense\n    raise(ArgumentError, \"only works for non-integer, non-object dtypes\") if\n      integer_dtype? or object_dtype? or b.integer_dtype? or b.object_dtype?\n\n    opts = { form: :general }.merge(opts)\n    x    = b.clone\n    n    = self.shape[0]\n    nrhs = b.shape[1]\n\n    case opts[:form]\n    when :general\n      clone = self.clone\n      ipiv = NMatrix::LAPACK.clapack_getrf(:row, n, n, clone, n)\n      # When we call clapack_getrs with :row, actually only the first matrix\n      # (i.e. clone) is interpreted as row-major, while the other matrix (x)\n      # is interpreted as column-major. See here: http://math-atlas.sourceforge.net/faq.html#RowSolve\n      # So we must transpose x before and after\n      # calling it.\n      x = x.transpose\n      NMatrix::LAPACK.clapack_getrs(:row, :no_transpose, n, nrhs, clone, n, ipiv, x, n)\n      x.transpose\n    when :upper_tri, :upper_triangular\n      raise(ArgumentError, \"upper triangular solver does not work with complex dtypes\") if\n        complex_dtype? or b.complex_dtype?\n      # this is the correct function call; see https://github.com/SciRuby/nmatrix/issues/374\n      NMatrix::BLAS::cblas_trsm(:row, :left, :upper, false, :nounit, n, nrhs, 1.0, self, n, x, nrhs)\n      x\n    when :lower_tri, :lower_triangular\n      raise(ArgumentError, \"lower triangular solver does not work with complex dtypes\") if\n        complex_dtype? or b.complex_dtype?\n      NMatrix::BLAS::cblas_trsm(:row, :left, :lower, false, :nounit, n, nrhs, 1.0, self, n, x, nrhs)\n      x\n    when :pos_def, :positive_definite\n      u, l = self.factorize_cholesky\n      z = l.solve(b, form: :lower_tri)\n      u.solve(z, form: :upper_tri)\n    else\n      raise(ArgumentError, \"#{opts[:form]} is not a valid form option\")\n    end\n  end\n\n  #\n  # call-seq:\n  #     least_squares(b) -> NMatrix\n  #     least_squares(b, tolerance: 10e-10) -> NMatrix\n  #\n  # Provides the linear least squares approximation of an under-determined system\n  # using QR factorization provided that the matrix is not rank-deficient.\n  #\n  # Only works for dense matrices.\n  #\n  # * *Arguments* :\n  #   - +b+ -> The solution column vector NMatrix of A * X = b.\n  #   - +tolerance:+ -> Absolute tolerance to check if a diagonal element in A = QR is near 0\n  #\n  # * *Returns* :\n  #   - NMatrix that is a column vector with the LLS solution\n  #\n  # * *Raises* :\n  #   - +ArgumentError+ -> least squares approximation only works for non-complex types\n  #   - +ShapeError+ -> system must be under-determined ( rows > columns )\n  #\n  # Examples :-\n  #\n  #   a = NMatrix.new([3,2], [2.0, 0, -1, 1, 0, 2])\n  #\n  #   b = NMatrix.new([3,1], [1.0, 0, -1])\n  #\n  #   a.least_squares(b)\n  #     =>[\n  #         [ 0.33333333333333326 ]\n  #         [ -0.3333333333333334 ]\n  #       ]\n  #\n  def least_squares(b, tolerance: 10e-6)\n    raise(ArgumentError, \"least squares approximation only works for non-complex types\") if\n      self.complex_dtype?\n\n    rows, columns = self.shape\n\n    raise(ShapeError, \"system must be under-determined ( rows > columns )\") unless\n      rows > columns\n\n    #Perform economical QR factorization\n    r = self.clone\n    tau = r.geqrf!\n    q_transpose_b = r.ormqr(tau, :left, :transpose, b)\n\n    #Obtain R from geqrf! intermediate\n    r[0...columns, 0...columns].upper_triangle!\n    r[columns...rows, 0...columns] = 0\n\n    diagonal = r.diagonal\n\n    raise(ArgumentError, \"rank deficient matrix\") if diagonal.any? { |x| x == 0 }\n\n    if diagonal.any? { |x| x.abs < tolerance }\n      warn \"warning: A diagonal element of R in A = QR is close to zero ;\" <<\n           \" indicates a possible loss of precision\"\n    end\n\n    # Transform the system A * X = B to R1 * X = B2 where B2 = Q1_t * B\n    r1 = r[0...columns, 0...columns]\n    b2 = q_transpose_b[0...columns]\n\n    nrhs = b2.shape[1]\n\n    #Solve the upper triangular system\n    NMatrix::BLAS::cblas_trsm(:row, :left, :upper, false, :nounit, r1.shape[0], nrhs, 1.0, r1, r1.shape[0], b2, nrhs)\n    b2\n  end\n\n  #\n  # call-seq:\n  #     gesvd! -> [u, sigma, v_transpose]\n  #     gesvd! -> [u, sigma, v_conjugate_transpose] # complex\n  #\n  # Compute the singular value decomposition of a matrix using LAPACK's GESVD function.\n  # This is destructive, modifying the source NMatrix.  See also #gesdd.\n  #\n  # Optionally accepts a +workspace_size+ parameter, which will be honored only if it is larger than what LAPACK\n  # requires.\n  #\n  def gesvd!(workspace_size=1)\n    NMatrix::LAPACK::gesvd(self, workspace_size)\n  end\n\n  #\n  # call-seq:\n  #     gesvd -> [u, sigma, v_transpose]\n  #     gesvd -> [u, sigma, v_conjugate_transpose] # complex\n  #\n  # Compute the singular value decomposition of a matrix using LAPACK's GESVD function.\n  #\n  # Optionally accepts a +workspace_size+ parameter, which will be honored only if it is larger than what LAPACK\n  # requires.\n  #\n  def gesvd(workspace_size=1)\n    self.clone.gesvd!(workspace_size)\n  end\n\n\n\n  #\n  # call-seq:\n  #     gesdd! -> [u, sigma, v_transpose]\n  #     gesdd! -> [u, sigma, v_conjugate_transpose] # complex\n  #\n  # Compute the singular value decomposition of a matrix using LAPACK's GESDD function. This uses a divide-and-conquer\n  # strategy. This is destructive, modifying the source NMatrix.  See also #gesvd.\n  #\n  # Optionally accepts a +workspace_size+ parameter, which will be honored only if it is larger than what LAPACK\n  # requires.\n  #\n  def gesdd!(workspace_size=nil)\n    NMatrix::LAPACK::gesdd(self, workspace_size)\n  end\n\n  #\n  # call-seq:\n  #     gesdd -> [u, sigma, v_transpose]\n  #     gesdd -> [u, sigma, v_conjugate_transpose] # complex\n  #\n  # Compute the singular value decomposition of a matrix using LAPACK's GESDD function. This uses a divide-and-conquer\n  # strategy. See also #gesvd.\n  #\n  # Optionally accepts a +workspace_size+ parameter, which will be honored only if it is larger than what LAPACK\n  # requires.\n  #\n  def gesdd(workspace_size=nil)\n    self.clone.gesdd!(workspace_size)\n  end\n\n  #\n  # call-seq:\n  #     laswp!(ary) -> NMatrix\n  #\n  # In-place permute the columns of a dense matrix using LASWP according to the order given as an array +ary+.\n  #\n  # If +:convention+ is +:lapack+, then +ary+ represents a sequence of pair-wise permutations which are\n  # performed successively. That is, the i'th entry of +ary+ is the index of the column to swap\n  # the i'th column with, having already applied all earlier swaps.\n  #\n  # If +:convention+ is +:intuitive+, then +ary+ represents the order of columns after the permutation.\n  # That is, the i'th entry of +ary+ is the index of the column that will be in position i after the\n  # reordering (Matlab-like behaviour). This is the default.\n  #\n  # Not yet implemented for yale or list.\n  #\n  # == Arguments\n  #\n  # * +ary+ - An Array specifying the order of the columns. See above for details.\n  #\n  # == Options\n  #\n  # * +:covention+ - Possible values are +:lapack+ and +:intuitive+. Default is +:intuitive+. See above for details.\n  #\n  def laswp!(ary, opts={})\n    raise(StorageTypeError, \"ATLAS functions only work on dense matrices\") unless self.dense?\n    opts = { convention: :intuitive }.merge(opts)\n\n    if opts[:convention] == :intuitive\n      if ary.length != ary.uniq.length\n        raise(ArgumentError, \"No duplicated entries in the order array are allowed under convention :intuitive\")\n      end\n      n = self.shape[1]\n      p = []\n      order = (0...n).to_a\n      0.upto(n-2) do |i|\n        p[i] = order.index(ary[i])\n        order[i], order[p[i]] = order[p[i]], order[i]\n      end\n      p[n-1] = n-1\n    else\n      p = ary\n    end\n\n    NMatrix::LAPACK::laswp(self, p)\n  end\n\n  #\n  # call-seq:\n  #     laswp(ary) -> NMatrix\n  #\n  # Permute the columns of a dense matrix using LASWP according to the order given in an array +ary+.\n  #\n  # If +:convention+ is +:lapack+, then +ary+ represents a sequence of pair-wise permutations which are\n  # performed successively. That is, the i'th entry of +ary+ is the index of the column to swap\n  # the i'th column with, having already applied all earlier swaps. This is the default.\n  #\n  # If +:convention+ is +:intuitive+, then +ary+ represents the order of columns after the permutation.\n  # That is, the i'th entry of +ary+ is the index of the column that will be in position i after the\n  # reordering (Matlab-like behaviour).\n  #\n  # Not yet implemented for yale or list.\n  #\n  # == Arguments\n  #\n  # * +ary+ - An Array specifying the order of the columns. See above for details.\n  #\n  # == Options\n  #\n  # * +:covention+ - Possible values are +:lapack+ and +:intuitive+. Default is +:lapack+. See above for details.\n  #\n  def laswp(ary, opts={})\n    self.clone.laswp!(ary, opts)\n  end\n\n  #\n  # call-seq:\n  #     det -> determinant\n  #\n  # Calculate the determinant by way of LU decomposition. This is accomplished\n  # using clapack_getrf, and then by taking the product of the diagonal elements. There is a\n  # risk of underflow/overflow.\n  #\n  # There are probably also more efficient ways to calculate the determinant.\n  # This method requires making a copy of the matrix, since clapack_getrf\n  # modifies its input.\n  #\n  # For smaller matrices, you may be able to use +#det_exact+.\n  #\n  # This function is guaranteed to return the same type of data in the matrix\n  # upon which it is called.\n  #\n  # Integer matrices are converted to floating point matrices for the purposes of\n  # performing the calculation, as xGETRF can't work on integer matrices.\n  #\n  # * *Returns* :\n  #   - The determinant of the matrix. It's the same type as the matrix's dtype.\n  # * *Raises* :\n  #   - +ShapeError+ -> Must be used on square matrices.\n  #\n  def det\n    raise(ShapeError, \"determinant can be calculated only for square matrices\") unless self.dim == 2 && self.shape[0] == self.shape[1]\n\n    # Cast to a dtype for which getrf is implemented\n    new_dtype = self.integer_dtype? ? :float64 : self.dtype\n    copy = self.cast(:dense, new_dtype)\n\n    # Need to know the number of permutations. We'll add up the diagonals of\n    # the factorized matrix.\n    pivot = copy.getrf!\n\n    num_perm = 0 #number of permutations\n    pivot.each_with_index do |swap, i|\n      #pivot indexes rows starting from 1, instead of 0, so need to subtract 1 here\n      num_perm += 1 if swap-1 != i\n    end\n    prod = num_perm % 2 == 1 ? -1 : 1 # odd permutations => negative\n    [shape[0],shape[1]].min.times do |i|\n      prod *= copy[i,i]\n    end\n\n    # Convert back to an integer if necessary\n    new_dtype != self.dtype ? prod.round : prod #prevent rounding errors\n  end\n\n  #\n  # call-seq:\n  #     complex_conjugate -> NMatrix\n  #     complex_conjugate(new_stype) -> NMatrix\n  #\n  # Get the complex conjugate of this matrix. See also complex_conjugate! for\n  # an in-place operation (provided the dtype is already +:complex64+ or\n  # +:complex128+).\n  #\n  # Doesn't work on list matrices, but you can optionally pass in the stype you\n  # want to cast to if you're dealing with a list matrix.\n  #\n  # * *Arguments* :\n  #   - +new_stype+ -> stype for the new matrix.\n  # * *Returns* :\n  #   - If the original NMatrix isn't complex, the result is a +:complex128+ NMatrix. Otherwise, it's the original dtype.\n  #\n  def complex_conjugate(new_stype = self.stype)\n    self.cast(new_stype, NMatrix::upcast(dtype, :complex64)).complex_conjugate!\n  end\n\n  #\n  # call-seq:\n  #     conjugate_transpose -> NMatrix\n  #\n  # Calculate the conjugate transpose of a matrix. If your dtype is already\n  # complex, this should only require one copy (for the transpose).\n  #\n  # * *Returns* :\n  #   - The conjugate transpose of the matrix as a copy.\n  #\n  def conjugate_transpose\n    self.transpose.complex_conjugate!\n  end\n\n  #\n  # call-seq:\n  #     absolute_sum -> Numeric\n  #\n  # == Arguments\n  #   - +incx+ -> the skip size (defaults to 1, no skip)\n  #   - +n+ -> the number of elements to include\n  #\n  # Return the sum of the contents of the vector. This is the BLAS asum routine.\n  def asum incx=1, n=nil\n    if self.shape == [1]\n      return self[0].abs unless self.complex_dtype?\n      return self[0].real.abs + self[0].imag.abs\n    end\n    return method_missing(:asum, incx, n) unless vector?\n    NMatrix::BLAS::asum(self, incx, self.size / incx)\n  end\n  alias :absolute_sum :asum\n\n  #\n  # call-seq:\n  #     norm2 -> Numeric\n  #\n  # == Arguments\n  #   - +incx+ -> the skip size (defaults to 1, no skip)\n  #   - +n+ -> the number of elements to include\n  #\n  # Return the 2-norm of the vector. This is the BLAS nrm2 routine.\n  def nrm2 incx=1, n=nil\n    return method_missing(:nrm2, incx, n) unless vector?\n    NMatrix::BLAS::nrm2(self, incx, self.size / incx)\n  end\n  alias :norm2 :nrm2\n\n  #\n  # call-seq:\n  #     scale! -> NMatrix\n  #\n  # == Arguments\n  #   - +alpha+ -> Scalar value used in the operation.\n  #   - +inc+ -> Increment used in the scaling function. Should generally be 1.\n  #   - +n+ -> Number of elements of +vector+.\n  #\n  # This is a destructive method, modifying the source NMatrix.  See also #scale.\n  # Return the scaling result of the matrix. BLAS scal will be invoked if provided.\n\n  def scale!(alpha, incx=1, n=nil)\n    raise(DataTypeError, \"Incompatible data type for the scaling factor\") unless\n        NMatrix::upcast(self.dtype, NMatrix::min_dtype(alpha)) == self.dtype\n    return NMatrix::BLAS::scal(alpha, self, incx, self.size / incx) if NMatrix::BLAS.method_defined? :scal\n    self.each_stored_with_indices do |e, *i|\n      self[*i] = e*alpha\n    end\n  end\n\n  #\n  # call-seq:\n  #     scale -> NMatrix\n  #\n  # == Arguments\n  #   - +alpha+ -> Scalar value used in the operation.\n  #   - +inc+ -> Increment used in the scaling function. Should generally be 1.\n  #   - +n+ -> Number of elements of +vector+.\n  #\n  # Return the scaling result of the matrix. BLAS scal will be invoked if provided.\n\n  def scale(alpha, incx=1, n=nil)\n    return self.clone.scale!(alpha, incx, n)\n  end\n\n  alias :permute_columns  :laswp\n  alias :permute_columns! :laswp!\n\nend\n"
  },
  {
    "path": "lib/nmatrix/enumerate.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == enumerate.rb\n#\n# Enumeration methods for NMatrix\n#++\n\nclass NMatrix\n  include Enumerable\n\n  ##\n  # call-seq:\n  #   each -> Enumerator\n  #\n  # Enumerate through the matrix. @see Enumerable#each\n  #\n  # For dense, this actually calls a specialized each iterator (in C). For yale and list, it relies upon\n  # #each_with_indices (which is about as fast as reasonably possible for C code).\n  def each &bl\n    if self.stype == :dense\n      self.__dense_each__(&bl)\n    elsif block_given?\n      self.each_with_indices(&bl)\n    else # Handle case where no block is given\n      Enumerator.new do |yielder|\n        self.each_with_indices do |params|\n          yielder.yield params\n        end\n      end\n    end\n  end\n\n  #\n  # call-seq:\n  #     flat_map -> Enumerator\n  #     flat_map { |elem| block } -> Array\n  #\n  # Maps using Enumerator (returns an Array or an Enumerator)\n  alias_method :flat_map, :map\n\n  ##\n  # call-seq:\n  #   map -> Enumerator\n  #   map { |elem| block } -> NMatrix\n  #\n  # Returns an NMatrix if a block is given. For an Array, use #flat_map\n  #\n  # Note that #map will always return an :object matrix, because it has no way of knowing\n  # how to handle operations on the different dtypes.\n  #\n  def map(&bl)\n    return enum_for(:map) unless block_given?\n    # NMatrix-jruby currently supports only doubles\n    cp  = jruby? ? self : self.cast(dtype: :object)\n    cp.map!(&bl)\n    cp\n  end\n\n  ##\n  # call-seq:\n  #   map! -> Enumerator\n  #   map! { |elem| block } -> NMatrix\n  #\n  # Maps in place.\n  # @see #map\n  #\n  def map!\n    return enum_for(:map!) unless block_given?\n    iterated = false\n    self.each_stored_with_indices do |e, *i|\n      iterated = true\n      self[*i] = (yield e)\n    end\n    #HACK: if there's a single element in a non-dense matrix, it won't iterate and\n    #won't change the default value; this ensures that it does get changed.\n    unless iterated then\n      self.each_with_indices do |e, *i|\n        self[*i] = (yield e)\n      end\n    end\n  end\n\n\n  #\n  # call-seq:\n  #     each_rank() -> NMatrix\n  #     each_rank() { |rank| block } -> NMatrix\n  #     each_rank(dimen) -> Enumerator\n  #     each_rank(dimen) { |rank| block } -> NMatrix\n  #\n  # Generic for @each_row, @each_col\n  #\n  # Iterate through each rank by reference.\n  #\n  # @param [Fixnum] dimen the rank being iterated over.\n  #\n  def each_rank(dimen=0, get_by=:reference)\n    return enum_for(:each_rank, dimen, get_by) unless block_given?\n    (0...self.shape[dimen]).each do |idx|\n      yield self.rank(dimen, idx, get_by)\n    end\n    self\n  end\n  alias :each_along_dim :each_rank\n\n  #\n  # call-seq:\n  #     each_row { |row| block } -> NMatrix\n  #\n  # Iterate through each row, referencing it as an NMatrix slice.\n  def each_row(get_by=:reference)\n    return enum_for(:each_row, get_by) unless block_given?\n    (0...self.shape[0]).each do |i|\n      yield self.row(i, get_by)\n    end\n    self\n  end\n\n  #\n  # call-seq:\n  #     each_column { |column| block } -> NMatrix\n  #\n  # Iterate through each column, referencing it as an NMatrix slice.\n  def each_column(get_by=:reference)\n    return enum_for(:each_column, get_by) unless block_given?\n    (0...self.shape[1]).each do |j|\n      yield self.column(j, get_by)\n    end\n    self\n  end\n\n  #\n  # call-seq:\n  #     each_layer -> { |column| block } -> ...\n  #\n  # Iterate through each layer, referencing it as an NMatrix slice.\n  #\n  # Note: If you have a 3-dimensional matrix, the first dimension contains rows,\n  # the second contains columns, and the third contains layers.\n  def each_layer(get_by=:reference)\n    return enum_for(:each_layer, get_by) unless block_given?\n    (0...self.shape[2]).each do |k|\n      yield self.layer(k, get_by)\n    end\n    self\n  end\n\n\n  #\n  # call-seq:\n  #     each_stored_with_index -> Enumerator\n  #\n  # Allow iteration across a vector NMatrix's stored values. See also @each_stored_with_indices\n  #\n  def each_stored_with_index(&block)\n    raise(NotImplementedError, \"only works for dim 2 vectors\") unless self.dim <= 2\n    return enum_for(:each_stored_with_index) unless block_given?\n\n    self.each_stored_with_indices do |v, i, j|\n      if shape[0] == 1\n        yield(v,j)\n      elsif shape[1] == 1\n        yield(v,i)\n      else\n        method_missing(:each_stored_with_index, &block)\n      end\n    end\n    self\n  end\n\n\n  ##\n  # call-seq:\n  #   inject_rank() -> Enumerator\n  #   inject_rank(dimen) -> Enumerator\n  #   inject_rank(dimen, initial) -> Enumerator\n  #   inject_rank(dimen, initial, dtype) -> Enumerator\n  #   inject_rank() { |elem| block } -> NMatrix\n  #   inject_rank(dimen) { |elem| block } -> NMatrix\n  #   inject_rank(dimen, initial) { |elem| block } -> NMatrix\n  #   inject_rank(dimen, initial, dtype) { |elem| block } -> NMatrix\n  #\n  # Reduces an NMatrix using a supplied block over a specified dimension.\n  # The block should behave the same way as for Enumerable#reduce.\n  #\n  # @param [Integer] dimen the dimension being reduced\n  # @param [Numeric] initial the initial value for the reduction\n  #  (i.e. the usual parameter to Enumerable#reduce).  Supply nil or do not\n  #  supply this argument to have it follow the usual Enumerable#reduce\n  #  behavior of using the first element as the initial value.\n  # @param [Symbol] dtype if non-nil/false, forces the accumulated result to have this dtype\n  # @return [NMatrix] an NMatrix with the same number of dimensions as the\n  #  input, but with the input dimension now having size 1.  Each element\n  #  is the result of the reduction at that position along the specified\n  #  dimension.\n  #\n  def inject_rank(dimen=0, initial=nil, dtype=nil)\n\n    raise(RangeError, \"requested dimension (#{dimen}) does not exist (shape: #{shape})\") if dimen > self.dim\n\n    return enum_for(:inject_rank, dimen, initial, dtype) unless block_given?\n\n    new_shape = shape.dup\n    new_shape[dimen] = 1\n\n    first_as_acc = false\n\n    if initial then\n      acc = NMatrix.new(new_shape, initial, :dtype => dtype || self.dtype, stype: self.stype)\n    else\n      each_rank(dimen) do |sub_mat|\n        acc = (sub_mat.is_a?(NMatrix) and !dtype.nil? and dtype != self.dtype) ? sub_mat.cast(self.stype, dtype) : sub_mat\n        break\n      end\n      first_as_acc = true\n    end\n\n    each_rank(dimen) do |sub_mat|\n      if first_as_acc\n        first_as_acc = false\n        next\n      end\n      acc = yield(acc, sub_mat)\n    end\n\n    acc\n  end\n\n  alias :reduce_along_dim :inject_rank\n  alias :inject_along_dim :inject_rank\n\nend\n"
  },
  {
    "path": "lib/nmatrix/fftw.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == fftw.rb\n#\n# ruby file for the nmatrix-fftw gem. Loads the C extension and defines\n# nice ruby interfaces for FFTW functions.\n#++\n\nrequire 'nmatrix/nmatrix.rb'\nrequire \"nmatrix_fftw.so\"\n\nclass NMatrix\n\n  # Compute 1D FFT of the matrix using FFTW default parameters.\n  # @return [NMatrix] NMatrix of dtype :complex128 containing computed values.\n  # @example Compute 1D FFT of an NMatrix.\n  #   nm = NMatrix.new([10],\n  #     [\n  #       Complex(9.32,0), Complex(44,0), Complex(125,0), Complex(34,0),\n  #       Complex(31,0),   Complex(44,0), Complex(12,0),  Complex(1,0),\n  #       Complex(53.23,0),Complex(-23.23,0)\n  #     ], dtype: :complex128)\n  #   nm.fft\n  def fft\n    input = self.dtype == :complex128 ? self : self.cast(dtype: :complex128)\n    plan  = NMatrix::FFTW::Plan.new([self.size])\n    plan.set_input input\n    plan.execute\n    plan.output\n  end\n\n  # Compute 2D FFT of a 2D matrix using FFTW default parameters.\n  # @return [NMatrix] NMatrix of dtype :complex128 containing computed values.\n  def fft2\n    raise ShapeError, \"Shape must be 2 (is #{self.shape})\" if self.shape.size != 2\n    input = self.dtype == :complex128 ? self : self.cast(dtype: :complex128)\n    plan  = NMatrix::FFTW::Plan.new(self.shape, dim: 2)\n    plan.set_input input\n    plan.execute\n    plan.output\n  end\n\n  module FFTW\n    class Plan\n      # Hash which holds the numerical values of constants that determine\n      # the kind of transform that will be computed for a real input/real\n      # output instance. These are one-one mappings to the respective constants\n      # specified in FFTW. For example, for specifying the FFTW_R2HC constant\n      # as the 'kind', pass the symbol :r2hc.\n      #\n      # @see http://www.fftw.org/fftw3_doc/Real_002dto_002dReal-Transform-Kinds.html#Real_002dto_002dReal-Transform-Kinds\n      REAL_REAL_FFT_KINDS_HASH = {\n        r2hc:    0,\n        hc2r:    1,\n        dht:     2,\n        redft00: 3,\n        redft01: 4,\n        redft10: 5,\n        redft11: 6,\n        rodft00: 7,\n        rodft01: 9,\n        rodft10: 8,\n        rodft11: 10\n      }\n\n      # Hash holding the numerical values of the flags that are passed in the \n      # `flags` argument of a FFTW planner routine. Multiple flags can be passed\n      # to one instance of the planner. Their values are OR'd ('|') and then passed.\n      # For example, for passing the FFTW_ESTIMATE constant, use :estimate.\n      #\n      # nmatrix-fftw supports the following flags into the planning routine:\n      # * :estimate - Equivalent to FFTW_ESTIMATE. Specifies that, instead of \n      #   actual measurements of different algorithms, a simple heuristic is \n      #   used to pick a (probably sub-optimal) plan quickly. With this flag, \n      #   the input/output arrays are not overwritten during planning.\n      # * :measure - Equivalent to FFTW_MEASURE. Tells FFTW to find an optimized\n      #   plan by actually computing several FFTs and measuring their execution\n      #   time. Depending on your machine, this can take some time (often a few \n      #   seconds).\n      # * :patient - Equivalent to FFTW_PATIENT. Like FFTW_MEASURE, but considers\n      #   a wider range of algorithms and often produces a “more optimal” plan \n      #   (especially for large transforms), but at the expense of several times\n      #   longer planning time (especially for large transforms).\n      # * :exhaustive - Equivalent to FFTW_EXHAUSTIVE. Like FFTW_PATIENT, but \n      #   considers an even wider range of algorithms, including many that we \n      #   think are unlikely to be fast, to produce the most optimal plan but \n      #   with a substantially increased planning time.\n      #\n      # @see http://www.fftw.org/fftw3_doc/Planner-Flags.html#Planner-Flags\n      FLAG_VALUE_HASH = {\n        estimate: 64,\n        measure: 0,\n        exhaustive: 8,\n        patient: 32\n      }\n\n      # Hash holding numerical values of the direction in which a :complex_complex\n      # type FFT should be performed.\n      #\n      # @see http://www.fftw.org/fftw3_doc/Complex-One_002dDimensional-DFTs.html#Complex-One_002dDimensional-DFTs\n      # (The fourth argument, sign, can be either FFTW_FORWARD (-1) or \n      # FFTW_BACKWARD (+1), and indicates the direction of the transform you are\n      # interested in; technically, it is the sign of the exponent in the transform)\n      FFT_DIRECTION_HASH = {\n        forward: -1,\n        backward: 1\n      }\n\n      # Hash holding numerical equivalents of the DFT type. Used for determining\n      # DFT type in C level.\n      DATA_TYPE_HASH = {\n        complex_complex: 0,\n        real_complex:    1,\n        complex_real:    2,\n        real_real:       3\n      }\n\n      # Array holding valid options that can be passed into NMatrix::FFTW::Plan\n      # so that invalid options aren't passed.\n      VALID_OPTS = [:dim, :type, :direction, :flags, :real_real_kind]\n\n      # @!attribute [r] shape\n      #   @return [Array] Shape of the plan. Sequence of Fixnums.\n      attr_reader :shape\n\n      # @!attribute [r] size\n      #   @return [Numeric] Size of the plan.\n      attr_reader :size\n\n      # @!attribute [r] type\n      #   @return [Symbol] Type of the plan. Can be :complex_complex, \n      #   :complex_real, :real_complex or :real_real\n      attr_reader :type\n\n      # @!attribute [r] direction\n      #   @return [Symbol] Can be :forward of :backward. Indicates the direction\n      #   of the transform you are interested in; technically, it is the sign of\n      #   the exponent in the transform. Valid only for :complex_complex type.\n      attr_reader :direction\n\n      # @!attribute [r] flags\n      #   @return [Array<Symbol>] Can contain one or more symbols from\n      #   FLAG_VALUE_HASH. Determines how the planner is prepared.\n      #   @see FLAG_VALUE_HASH\n      attr_reader :flags\n\n      # @!attribute [r] dim\n      #   @return [Fixnum] Dimension of the FFT. Should be 1 for 1-D FFT, 2 for\n      #   2-D FFT and so on.\n      attr_reader :dim\n\n      # @!attribute [r] input\n      #   @return [NMatrix] Input NMatrix. Will be valid once the \n      #   NMatrix::FFTW::Plan#set_input method has been called.\n      attr_reader :input\n\n      # @!attribute [r] output\n      #   @return [NMatrix] Output NMatrix. Will be valid once the \n      #   NMatrix::FFTW::Plan#execute method has been called.\n      attr_reader :output\n\n      # @!attribute [r] real_real_kind\n      #   @return [Symbol] Specifies the kind of real to real FFT being performed.\n      #   This is a symbol from REAL_REAL_FFT_KINDS_HASH. Only valid when type\n      #   of transform is of type :real_real.\n      #   @see REAL_REAL_FFT_KINDS_HASH\n      #   @see http://www.fftw.org/fftw3_doc/Real_002dto_002dReal-Transform-Kinds.html#Real_002dto_002dReal-Transform-Kinds\n      attr_reader :real_real_kind\n\n      # Create a plan for a DFT. The FFTW library requires that you first create\n      # a plan for performing a DFT, so that FFTW can optimize its algorithms\n      # according to your computer's hardware and various user supplied options.\n      # \n      # @see http://www.fftw.org/doc/Using-Plans.html \n      #   For a comprehensive explanation of the FFTW planner.\n      # @param shape [Array, Fixnum] Specify the shape of the plan. For 1D\n      #   fourier transforms this can be a single number specifying the length of \n      #   the input. For multi-dimensional transforms, specify an Array containing\n      #   the length of each dimension.\n      # @param [Hash] opts the options to create a message with.\n      # @option opts [Fixnum] :dim (1) The number of dimensions of the Fourier\n      #   transform. If 'shape' has more numbers than :dim, the number of dimensions\n      #   specified by :dim will be considered when making the plan.\n      # @option opts [Symbol] :type (:complex_complex) The type of transform to\n      #   perform based on the input and output data desired. The default value\n      #   indicates that a transform is being planned that uses complex numbers\n      #   as input and generates complex numbers as output. Similarly you can\n      #   use :complex_real, :real_complex or :real_real to specify the kind\n      #   of input and output that you will be supplying to the plan.\n      #   @see DATA_TYPE_HASH\n      # @option opts [Symbol, Array] :flags (:estimate) Specify one or more flags\n      #   which denote the methodology that is used for deciding the algorithm used\n      #   when planning the fourier transform. Use one or more of :estimate, :measure,\n      #   :exhaustive and :patient. These flags map to the planner flags specified\n      #   at http://www.fftw.org/fftw3_doc/Planner-Flags.html#Planner-Flags.\n      #   @see REAL_REAL_FFT_KINDS_HASH\n      # @option opts [Symbol] :direction (:forward) The direction of a DFT of\n      #   type :complex_complex. Technically, it is the sign of the exponent in \n      #   the transform. :forward corresponds to -1 and :backward to +1.\n      #   @see FFT_DIRECTION_HASH\n      # @option opts [Array] :real_real_kind When the type of transform is :real_real,\n      #   specify the kind of transform that should be performed FOR EACH AXIS\n      #   of input. The position of the symbol in the Array corresponds to the \n      #   axis of the input. The number of elements in :real_real_kind must be equal to\n      #   :dim. Can accept one of the inputs specified in REAL_REAL_FFT_KINDS_HASH.\n      #   @see REAL_REAL_FFT_KINDS_HASH\n      #   @see http://www.fftw.org/fftw3_doc/Real_002dto_002dReal-Transform-Kinds.html#Real_002dto_002dReal-Transform-Kinds\n      # @example Create a plan for a basic 1D FFT and execute it.\n      #   input = NMatrix.new([10],\n      #     [\n      #       Complex(9.32,0), Complex(44,0), Complex(125,0), Complex(34,0),\n      #       Complex(31,0),   Complex(44,0), Complex(12,0),  Complex(1,0),\n      #       Complex(53.23,0),Complex(-23.23,0),\n      #     ], dtype: :complex128)\n      #   plan = NMatrix::FFTW::Plan.new(10)\n      #   plan.set_input input\n      #   plan.execute\n      #   print plan.output\n      def initialize shape, opts={}\n        verify_opts opts\n        opts = {\n          dim: 1,\n          flags: :estimate,\n          direction: :forward,\n          type: :complex_complex\n        }.merge(opts)\n\n        @type      = opts[:type]\n        @dim       = opts[:dim]\n        @direction = opts[:direction]\n        @shape     = shape.is_a?(Array) ? shape : [shape]\n        @size      = @shape[0...@dim].inject(:*)\n        @flags     = opts[:flags].is_a?(Array) ? opts[:flags] : [opts[:flags]]\n        @real_real_kind    = opts[:real_real_kind]\n\n        raise ArgumentError, \":real_real_kind option must be specified for :real_real type transforms\" if\n          @real_real_kind.nil? and @type == :real_real\n\n        raise ArgumentError, \"Specify kind of transform of each axis of input.\" if\n          @real_real_kind and @real_real_kind.size != @dim\n\n        raise ArgumentError, \"dim (#{@dim}) cannot be more than size of shape #{@shape.size}\" if\n          @dim > @shape.size\n\n        @plan_data = c_create_plan(@shape, @size, @dim, \n          combine_flags(@flags), FFT_DIRECTION_HASH[@direction], \n          DATA_TYPE_HASH[@type], encoded_rr_kind)\n      end\n\n      # Set input for the planned DFT.\n      # @param [NMatrix] ip An NMatrix specifying the input to the FFT routine.\n      #   The data type of the NMatrix must be either :complex128 or :float64\n      #   depending on the type of FFT that has been planned. Size must be same\n      #   as the size of the planned routine.\n      # @raise [ArgumentError] if the input has any storage apart from :dense\n      #   or if size/data type of the planned transform and the input matrix\n      #   don't match.\n      def set_input ip\n        raise ArgumentError, \"stype must be dense.\" if ip.stype != :dense\n        raise ArgumentError, \"size of input (#{ip.size}) cannot be greater than planned input size #{@size}\" if\n          ip.size != @size\n        \n        case @type\n        when :complex_complex, :complex_real\n          raise ArgumentError, \"dtype must be complex128.\" if ip.dtype != :complex128\n        when :real_complex, :real_real\n          raise ArgumentError, \"dtype must be float64.\" if ip.dtype != :float64\n        else\n          raise \"Invalid type #{@type}\"\n        end\n\n        @input = ip\n        c_set_input(ip, @plan_data, DATA_TYPE_HASH[@type])\n      end\n\n      # Execute the DFT with the set plan.\n      # @return [TrueClass] If all goes well and the fourier transform has been\n      #   sucessfully computed, 'true' will be returned and you can access the\n      #   computed output from the NMatrix::FFTW::Plan#output accessor.\n      def execute\n        @output = \n        case @type\n        when :complex_complex\n          @input.clone_structure        \n        when :real_complex\n          NMatrix.new([@input.size/2 + 1], dtype: :complex128)\n        when :complex_real, :real_real\n          NMatrix.new([@input.size], dtype: :float64)\n        else\n          raise TypeError, \"Invalid type #{@type}\"\n        end\n\n        c_execute(@output, @plan_data, DATA_TYPE_HASH[@type])\n      end\n     private\n\n      # Combine flags received from the user (Symbols) into their respective\n      # numeric equivalents and then 'OR' (|) all of them so the resulting number\n      # can be passed directly to the FFTW planner function.\n      def combine_flags flgs\n        temp = 0\n        flgs.each do |f|\n          temp |= FLAG_VALUE_HASH[f]\n        end\n        temp\n      end\n\n      # Verify options passed into the constructor to make sure that no invalid\n      # options have been passed.\n      def verify_opts opts\n        unless (opts.keys - VALID_OPTS).empty?\n          raise ArgumentError, \"#{opts.keys - VALID_OPTS} are invalid opts.\"\n        end\n      end\n\n      # Get the numerical equivalents of the kind of real-real FFT to be computed.\n      def encoded_rr_kind\n        return @real_real_kind.map { |e| REAL_REAL_FFT_KINDS_HASH[e] } if @real_real_kind\n      end\n    end\n  end\nend"
  },
  {
    "path": "lib/nmatrix/homogeneous.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == homogeneous.rb\n#\n# This file contains optional shortcuts for generating homogeneous\n# transformations.\n#\n#++\n\nclass NMatrix\n  class << self\n    #\n    # call-seq:\n    #     x_rotation(angle_in_radians) -> NMatrix\n    #     x_rotation(angle_in_radians, dtype: dtype) -> NMatrix\n    #     y_rotation(angle_in_radians) -> NMatrix\n    #     y_rotation(angle_in_radians, dtype: dtype) -> NMatrix\n    #     z_rotation(angle_in_radians) -> NMatrix\n    #     z_rotation(angle_in_radians, dtype: dtype) -> NMatrix\n    #\n    # Generate a 4x4 homogeneous transformation matrix representing a rotation\n    # about the x, y, or z axis respectively.\n    #\n    # * *Arguments* :\n    #   - +angle_in_radians+ -> The angle of rotation in radians.\n    #   - +dtype+ -> (optional) Default is +:float64+\n    # * *Returns* :\n    #   - A homogeneous transformation matrix consisting of a single rotation.\n    #\n    # Examples:\n    #\n    #    NMatrix.x_rotation(Math::PI.quo(6)) # =>\n    #                                              1.0      0.0       0.0       0.0\n    #                                              0.0      0.866025 -0.499999  0.0\n    #                                              0.0      0.499999  0.866025  0.0\n    #                                              0.0      0.0       0.0       1.0\n    #\n    #\n    #    NMatrix.x_rotation(Math::PI.quo(6), dtype: :float32) # =>\n    #                                              1.0      0.0       0.0       0.0\n    #                                              0.0      0.866025 -0.5       0.0\n    #                                              0.0      0.5       0.866025  0.0\n    #                                              0.0      0.0       0.0       1.0\n    #\n    def x_rotation angle_in_radians, opts={}\n      c = Math.cos(angle_in_radians)\n      s = Math.sin(angle_in_radians)\n      NMatrix.new(4, [1.0, 0.0, 0.0, 0.0,\n                      0.0, c,   -s,  0.0,\n                      0.0, s,    c,  0.0,\n                      0.0, 0.0, 0.0, 1.0], {dtype: :float64}.merge(opts))\n    end\n\n    def y_rotation angle_in_radians, opts={}\n      c = Math.cos(angle_in_radians)\n      s = Math.sin(angle_in_radians)\n      NMatrix.new(4, [ c,  0.0,  s,  0.0,\n                      0.0, 1.0, 0.0, 0.0,\n                      -s,  0.0,  c,  0.0,\n                      0.0, 0.0, 0.0, 1.0], {dtype: :float64}.merge(opts))\n    end\n\n    def z_rotation angle_in_radians, opts={}\n      c = Math.cos(angle_in_radians)\n      s = Math.sin(angle_in_radians)\n      NMatrix.new(4, [ c,  -s,  0.0, 0.0,\n                       s,   c,  0.0, 0.0,\n                      0.0, 0.0, 1.0, 0.0,\n                      0.0, 0.0, 0.0, 1.0], {dtype: :float64}.merge(opts))\n    end\n\n\n    #\n    # call-seq:\n    #     translation(x, y, z) -> NMatrix\n    #     translation([x,y,z]) -> NMatrix\n    #     translation(translation_matrix) -> NMatrix\n    #     translation(translation_matrix) -> NMatrix\n    #     translation(translation, dtype: dtype) -> NMatrix\n    #     translation(x, y, z, dtype: dtype) -> NMatrix\n    #\n    # Generate a 4x4 homogeneous transformation matrix representing a translation.\n    #\n    # * *Returns* :\n    #   - A homogeneous transformation matrix consisting of a translation.\n    #\n    # Examples:\n    #\n    #    NMatrix.translation(4.0,5.0,6.0) # =>\n    #                                          1.0   0.0   0.0   4.0\n    #                                          0.0   1.0   0.0   5.0\n    #                                          0.0   0.0   1.0   6.0\n    #                                          0.0   0.0   0.0   1.0\n    #\n    #    NMatrix.translation(4.0,5.0,6.0, dtype: :int64) # =>\n    #                                                         1  0  0  4\n    #                                                         0  1  0  5\n    #                                                         0  0  1  6\n    #                                                         0  0  0  1\n    #    NMatrix.translation(4,5,6) # =>\n    #                                     1  0  0  4\n    #                                     0  1  0  5\n    #                                     0  0  1  6\n    #                                     0  0  0  1\n    #\n    def translation *args\n      xyz = args.shift if args.first.is_a?(NMatrix) || args.first.is_a?(Array)\n      default_dtype = xyz.respond_to?(:dtype) ? xyz.dtype : NMatrix.guess_dtype(xyz)\n      opts = {dtype: default_dtype}\n      opts = opts.merge(args.pop) if args.size > 0 && args.last.is_a?(Hash)\n      xyz ||= args\n\n      n = if args.size > 0\n        NMatrix.eye(4, opts)\n      else\n        NMatrix.eye(4, opts)\n      end\n      n[0..2,3] = xyz\n      n\n    end\n  end\n\n  #\n  # call-seq:\n  #     quaternion -> NMatrix\n  #\n  # Find the quaternion for a 3D rotation matrix.\n  #\n  # Code borrowed from: http://courses.cms.caltech.edu/cs171/quatut.pdf\n  #\n  # * *Returns* :\n  #   - A length-4 NMatrix representing the corresponding quaternion.\n  #\n  # Examples:\n  #\n  #    n.quaternion # => [1, 0, 0, 0]\n  #\n  def quaternion\n    raise(ShapeError, \"Expected square matrix\") if self.shape[0] != self.shape[1]\n    raise(ShapeError, \"Expected 3x3 rotation (or 4x4 homogeneous) matrix\") if self.shape[0] > 4 || self.shape[0] < 3\n\n    q = NMatrix.new([4], dtype: self.dtype == :float32 ? :float32: :float64)\n    rotation_trace = self[0,0] + self[1,1] + self[2,2]\n    if rotation_trace >= 0\n      self_w = self.shape[0] == 4 ? self[3,3] : 1.0\n      root_of_homogeneous_trace = Math.sqrt(rotation_trace + self_w)\n      q[0] = root_of_homogeneous_trace * 0.5\n      s = 0.5 / root_of_homogeneous_trace\n      q[1] = (self[2,1] - self[1,2]) * s\n      q[2] = (self[0,2] - self[2,0]) * s\n      q[3] = (self[1,0] - self[0,1]) * s\n    else\n      h = 0\n      h = 1 if self[1,1] > self[0,0]\n      h = 2 if self[2,2] > self[h,h]\n\n      case_macro = Proc.new do |i,j,k,ii,jj,kk|\n        qq = NMatrix.new([4], dtype: :float64)\n        self_w = self.shape[0] == 4 ? self[3,3] : 1.0\n        s = Math.sqrt( (self[ii,ii] - (self[jj,jj] + self[kk,kk])) + self_w)\n        qq[i] = s*0.5\n        s = 0.5 / s\n        qq[j] = (self[ii,jj] + self[jj,ii]) * s\n        qq[k] = (self[kk,ii] + self[ii,kk]) * s\n        qq[0] = (self[kk,jj] - self[jj,kk]) * s\n        qq\n      end\n\n      case h\n      when 0\n        q = case_macro.call(1,2,3, 0,1,2)\n      when 1\n        q = case_macro.call(2,3,1, 1,2,0)\n      when 2\n        q = case_macro.call(3,1,2, 2,0,1)\n      end\n\n      self_w = self.shape[0] == 4 ? self[3,3] : 1.0\n      if self_w != 1\n        s = 1.0 / Math.sqrt(self_w)\n        q[0] *= s\n        q[1] *= s\n        q[2] *= s\n        q[3] *= s\n      end\n    end\n\n    q\n  end\n\n  #\n  # call-seq:\n  #     angle_vector -> [angle, about_vector]\n  #\n  # Find the angle vector for a quaternion. Assumes the quaternion has unit length.\n  #\n  # Source: http://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToAngle/\n  #\n  # * *Returns* :\n  #   - An angle (in radians) describing the rotation about the +about_vector+.\n  #   - A length-3 NMatrix representing the corresponding quaternion.\n  #\n  # Examples:\n  #\n  #    q.angle_vector # => [1, 0, 0, 0]\n  #\n  def angle_vector\n    raise(ShapeError, \"Expected length-4 vector or matrix (quaternion)\") if self.shape[0] != 4\n    raise(\"Expected unit quaternion\") if self[0] > 1\n\n    xyz = NMatrix.new([3], dtype: self.dtype)\n\n    angle = 2 * Math.acos(self[0])\n    s = Math.sqrt(1.0 - self[0]*self[0])\n\n    xyz[0..2] = self[1..3]\n    xyz /= s if s >= 0.001 # avoid divide by zero\n    return [angle, xyz]\n  end\nend"
  },
  {
    "path": "lib/nmatrix/io/fortran_format.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2016, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2016, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == io/matlab/fortran_format.rb\n#\n# A parser for making sense of FORTRAN formats.\n# => Only handles R (real), F (float) and E (exponential) format codes. \n#++\n\nclass NMatrix\n  module IO\n    module FortranFormat\n\n      # Class for reading strings in FORTRAN format for specifying attributes\n      # of numerical data in a file. Supports F (float), E (exponential) and \n      # R (real).\n      # \n      # == Usage\n      # \n      #   p = NMatrix::IO::FortranFormat::Reader.new(\"(16I5)\")\n      #   v = p.parse\n      #   puts v #=> { :format_code => \"INT_ID\", \n      #          #=>   :repeat      =>       16,\n      #          #=>   :field_width =>        5 }\n      class Reader\n\n        # Accepts a string in FORTRAN format and initializes the \n        # NMatrix::IO::FortranFormat::Reader object for further parsing of the \n        # data.\n        # \n        # == Arguments\n        # \n        # * +string+ - FORTRAN format string to be parsed.\n        def initialize string\n          @string = string\n        end\n\n        # Parses the FORTRAN format string passed in initialize and returns\n        # a hash of the results.\n        # \n        # == Result Hash Format\n        # \n        # Take note that some of the below parameters may be absent in the hash\n        # depending on the type of string being parsed.\n        # \n        # * +:format_code+ - A string containing the format code of the read data. \n        #                    Can be \"INT_ID\", \"FP_ID\" or \"EXP_ID\" \n        # * +:repeat+      - Number of times this format will repeat in a line.\n        # * +:field_width+ - Width of the numerical part of the number.\n        # * +:post_decimal_width+ - Width of the numerals after the decimal point.\n        # * +:exponent_width+ - Width of exponent part of the number.\n        def parse\n          raise(IOError, \"Left or right parentheses missing\") \\\n           if parentheses_missing? # change tests to handle 'raise' not return\n\n          @result = {}\n          @string = @string[1..-2]\n\n          if valid_fortran_format?\n            load_result\n          else\n            raise(IOError, \"Invalid FORTRAN format specified. Only Integer, Float or Exponential acceptable.\")\n          end\n\n          @result\n        end\n\n       private\n        def parentheses_missing?\n          true if @string[0] != '(' or @string[-1] != ')'\n        end\n\n        # Changing any of the following regular expressions can lead to disaster\n        def valid_fortran_format?\n          @mdata = @string.match(/\\A(\\d*)(I)(\\d+)\\z/) # check for integer format\n          @mdata = @string.match(/\\A(\\d*)(F)(\\d+)\\.(\\d+)\\z/) \\\n           if @mdata.nil? # check for floating point if not integer\n          @mdata =  @string.match(/\\A(\\d*)(E)(\\d+)\\.(\\d+)(E)?(\\d*)\\z/) \\\n           if @mdata.nil? # check for exponential format if not floating point\n\n          @mdata\n        end\n\n        def load_result\n          if @mdata.to_a.include? \"I\"\n            create_integer_hash\n          elsif @mdata.to_a.include? \"F\"\n            create_float_hash\n          else\n            create_exp_hash\n          end\n        end\n\n        def create_integer_hash\n          @result[:format_code] = \"INT_ID\"\n          @result[:repeat]      = @mdata[1].to_i if !@mdata[1].empty?\n          @result[:field_width] = @mdata[3].to_i\n        end\n\n        def create_float_hash\n          @result[:format_code]        = \"FP_ID\"\n          @result[:repeat]             = @mdata[1].to_i if !@mdata[1].empty?\n          @result[:field_width]        = @mdata[3].to_i\n          @result[:post_decimal_width] = @mdata[4].to_i\n        end\n\n        def create_exp_hash\n          @result[:format_code]        = \"EXP_ID\"\n          @result[:repeat]             = @mdata[1].to_i if !@mdata[1].empty?\n          @result[:field_width]        = @mdata[3].to_i\n          @result[:post_decimal_width] = @mdata[4].to_i\n          @result[:exponent_width]     = @mdata[6].to_i if !@mdata[6].empty?\n        end\n      end\n      \n    end\n  end\nend"
  },
  {
    "path": "lib/nmatrix/io/harwell_boeing.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2016, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2016, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == io/matlab/harwell_boeing.rb\n#\n# Harwell Boeing file reader (and eventually writer too).\n# => Supports only assembled, non-symmetric, real matrices\n# => Data types supported are exponential, floating point and integer\n# => Returned NMatrix is of type :float64\n#++\n\nrequire_relative './fortran_format.rb'\n\nclass NMatrix\n  module IO\n    module HarwellBoeing\n\n      class << self\n        # Loads the contents of a valid Harwell Boeing format file and \n        # returns an NMatrix object with the values of the file and optionally\n        # only the header info.\n        # \n        # Supports only assembled, non-symmetric, real matrices. File name must\n        # have matrix type as extension.\n        # \n        # Example - test_file.rua\n        # \n        # == Arguments\n        # \n        # * +file_path+ - Path of the Harwell Boeing file  to load.\n        # * +opts+      - Options for specifying whether you want\n        #                 the values and  header or only the header.\n        # \n        # == Options\n        # \n        # * +:header+ - If specified as *true*, will return only the header of\n        #               the HB file.Will return the NMatrix object and\n        #               header as an array if left blank.\n        # \n        # == Usage\n        # \n        #   mat, head = NMatrix::IO::HarwellBoeing.load(\"test_file.rua\")\n        # \n        #   head = NMatrix::IO::HarwellBoeing.load(\"test_file.rua\", {header: true})\n        # \n        # == Alternate Usage\n        # \n        # You can specify the file using NMatrix::IO::Reader.new(\"path/to/file\")\n        # and then call *header* or *values* on the resulting object.\n        def load file_path, opts={}\n          hb_obj = NMatrix::IO::HarwellBoeing::Reader.new(file_path)\n\n          return hb_obj.header if opts[:header]\n\n          [hb_obj.values, hb_obj.header]\n        end\n      end\n\n      class Reader\n        def initialize file_name\n          raise(IOError, \"Unsupported file format. Specify file as \\\n            file_name.rua.\") if !file_name.match(/.*\\.[rR][uU][aA]/)\n\n          @file_name   = file_name\n          @header      = {}\n          @body        = nil\n        end\n\n        def header\n          return @header if !@header.empty?\n          @file = File.open @file_name, \"r\"\n\n          line = @file.gets\n\n          @header[:title] = line[0...72].strip\n          @header[:key]   = line[72...80].strip\n\n          line = @file.gets\n\n          @header[:totcrd] = line[0...14] .strip.to_i\n          @header[:ptrcrd] = line[14...28].strip.to_i\n          @header[:indcrd] = line[28...42].strip.to_i\n          @header[:valcrd] = line[42...56].strip.to_i\n          @header[:rhscrd] = line[56...70].strip.to_i\n\n          raise(IOError, \"Right hand sides not supported.\") \\\n           if @header[:rhscrd] > 0\n\n          line = @file.gets\n\n          @header[:mxtype] = line[0...3]\n\n          raise(IOError, \"Currently supports only real, assembled, unsymmetric \\\n            matrices.\") if !@header[:mxtype].match(/RUA/)\n\n          @header[:nrow]   = line[13...28].strip.to_i\n          @header[:ncol]   = line[28...42].strip.to_i\n          @header[:nnzero] = line[42...56].strip.to_i\n          @header[:neltvl] = line[56...70].strip.to_i\n\n          line = @file.gets\n\n          fortran_reader = NMatrix::IO::FortranFormat::Reader\n\n          @header[:ptrfmt] = fortran_reader.new(line[0...16].strip) .parse\n          @header[:indfmt] = fortran_reader.new(line[16...32].strip).parse\n          @header[:valfmt] = fortran_reader.new(line[32...52].strip).parse\n          @header[:rhsfmt] = fortran_reader.new(line[52...72].strip).parse\n\n          @header\n        end\n\n        def values\n          @header      = header if @header.empty?\n          @file.lineno = 5      if @file.lineno != 5\n          @matrix      = NMatrix.new([ @header[:nrow], @header[:ncol] ], \n                                      0, dtype: :float64)\n\n          read_column_pointers\n          read_row_indices\n          read_values\n\n          @file.close\n          \n          assemble_matrix\n\n          @matrix\n        end\n\n       private\n\n        def read_column_pointers\n          @col_ptrs  = []\n          pointer_lines     = @header[:ptrcrd]\n          pointers_per_line = @header[:ptrfmt][:repeat]\n          pointer_width     = @header[:ptrfmt][:field_width]\n\n          @col_ptrs = read_numbers :to_i, pointer_lines, pointers_per_line, \n                                             pointer_width\n\n          @col_ptrs.map! {|c| c -= 1}\n        end\n\n        def read_row_indices\n          @row_indices     = []\n          row_lines        = @header[:indcrd]\n          indices_per_line = @header[:indfmt][:repeat]\n          row_width        = @header[:indfmt][:field_width]\n\n          @row_indices = read_numbers :to_i, row_lines, indices_per_line, \n                                      row_width\n\n          @row_indices.map! {|r| r -= 1}\n        end\n\n        def read_values\n          @vals = []\n          value_lines = @header[:valcrd]\n          values_per_line = @header[:valfmt][:repeat]\n          value_width    = @header[:valfmt][:field_width]\n\n          @vals = read_numbers :to_f, value_lines, values_per_line, \n                                  value_width\n        end\n\n        def read_numbers to_dtype, num_of_lines, numbers_per_line, number_width\n          data = []\n\n          num_of_lines.times do \n            line  = @file.gets\n            index = 0\n\n            numbers_per_line.times do\n              delimiter = index + number_width\n\n              data << line[index...delimiter].strip.send(to_dtype)\n\n              break if line.length <= delimiter\n              index += number_width\n            end\n          end\n\n          data\n        end\n\n        def assemble_matrix\n          col = 0\n          @col_ptrs[0..-2].each_index do |index|\n            @col_ptrs[index].upto(@col_ptrs[index+1] - 1) do |row_ptr|\n              row               = @row_indices[row_ptr]\n              @matrix[row, col] = @vals[row_ptr]\n            end\n\n            col += 1\n          end\n        end\n      end\n\n    end\n  end\nend"
  },
  {
    "path": "lib/nmatrix/io/market.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2016, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2016, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == io/market.rb\n#\n# MatrixMarket reader and writer.\n#\n#++\n\n# Matrix Market is a repository of test data for use in studies of algorithms\n# for numerical linear algebra. There are 3 file formats used:\n#\n# - Matrix Market Exchange Format.\n# - Harwell-Boeing Exchange Format.\n# - Coordinate Text File Format. (to be phased out)\n#\n# This module can load and save the first format. We might support\n# Harwell-Boeing in the future.\n#\n# The MatrixMarket format is documented in:\n# * http://math.nist.gov/MatrixMarket/formats.html\nmodule NMatrix::IO::Market\n  CONVERTER_AND_DTYPE = {\n    :real => [:to_f, :float64],\n    :complex => [:to_c, :complex128],\n    :integer => [:to_i, :int64],\n    :pattern => [:to_i, :byte]\n  } #:nodoc:\n\n  ENTRY_TYPE = {\n    :byte => :integer, :int8 => :integer, :int16 => :integer,\n    :int32 => :integer, :int64 => :integer,:float32 => :real,\n    :float64 => :real, :complex64 => :complex, :complex128 => :complex\n  } #:nodoc:\n\n  class << self\n\n    # call-seq:\n    #     load(filename) -> NMatrix\n    #\n    # Load a MatrixMarket file. Requires a +filename+ as an argument.\n    #\n    # * *Arguments* :\n    #   - +filename+ -> String with the filename to be saved.\n    # * *Raises* :\n    #   - +IOError+ -> expected type code line beginning with '%%MatrixMarket matrix'\n    def load(filename)\n\n      f = File.new(filename, \"r\")\n\n      header = f.gets\n      header.chomp!\n      raise(IOError, \"expected type code line beginning with '%%MatrixMarket matrix'\") \\\n       if header !~ /^\\%\\%MatrixMarket\\ matrix/\n\n      header = header.split\n\n      entry_type = header[3].downcase.to_sym\n      symmetry   = header[4].downcase.to_sym\n      converter, default_dtype = CONVERTER_AND_DTYPE[entry_type]\n\n      if header[2] == 'coordinate'\n        load_coordinate f, converter, default_dtype, entry_type, symmetry\n      else\n        load_array f, converter, default_dtype, entry_type, symmetry\n      end\n    end\n\n    # call-seq:\n    #     save(matrix, filename, options = {}) -> true\n    #\n    # Can optionally set :symmetry to :general, :symmetric, :hermitian; and can\n    # set :pattern => true if you're writing a sparse matrix and don't want\n    # values stored.\n    #\n    # * *Arguments* :\n    #   - +matrix+ -> NMatrix with the data to be saved.\n    #   - +filename+ -> String with the filename to be saved.\n    # * *Raises* :\n    #   - +DataTypeError+ -> MatrixMarket does not support Ruby objects.\n    #   - +ArgumentError+ -> Expected two-dimensional NMatrix.\n    def save(matrix, filename, options = {})\n      options = {:pattern => false,\n        :symmetry => :general}.merge(options)\n\n      mode = matrix.stype == :dense ? :array : :coordinate\n      if [:object].include?(matrix.dtype)\n        raise(DataTypeError, \"MatrixMarket does not support Ruby objects\")\n      end\n      entry_type = options[:pattern] ? :pattern : ENTRY_TYPE[matrix.dtype]\n\n      raise(ArgumentError, \"expected two-dimensional NMatrix\") \\\n       if matrix.dim != 2\n\n      f = File.new(filename, 'w')\n\n      f.puts \"%%MatrixMarket matrix #{mode} #{entry_type} #{options[:symmetry]}\"\n\n      if matrix.stype == :dense\n        save_array matrix, f, options[:symmetry]\n      elsif [:list,:yale].include?(matrix.stype)\n        save_coordinate matrix, f, options[:symmetry], options[:pattern]\n      end\n\n      f.close\n\n      true\n    end\n\n\n    protected\n\n    def save_coordinate matrix, file, symmetry, pattern\n      # Convert to a hash in order to store\n      rows = matrix.to_h\n\n      # Count non-zeros\n      count = 0\n      rows.each_pair do |i, columns|\n        columns.each_pair do |j, val|\n          next if symmetry != :general && j > i\n          count += 1\n        end\n      end\n\n      # Print dimensions and non-zeros\n      file.puts \"#{matrix.shape[0]}\\t#{matrix.shape[1]}\\t#{count}\"\n\n      # Print coordinates\n      rows.each_pair do |i, columns|\n        columns.each_pair do |j, val|\n          next if symmetry != :general && j > i\n          file.puts(pattern ? \"\\t#{i+1}\\t#{j+1}\" : \"\\t#{i+1}\\t#{j+1}\\t#{val}\")\n        end\n      end\n\n      file\n    end\n\n\n    def save_array matrix, file, symmetry\n      file.puts [matrix.shape[0], matrix.shape[1]].join(\"\\t\")\n\n      if symmetry == :general\n        (0...matrix.shape[1]).each do |j|\n          (0...matrix.shape[0]).each do |i|\n            file.puts matrix[i,j]\n          end\n        end\n      else # :symmetric, :'skew-symmetric', :hermitian\n        (0...matrix.shape[1]).each do |j|\n          (j...matrix.shape[0]).each do |i|\n            file.puts matrix[i,j]\n          end\n        end\n      end\n\n      file\n    end\n\n\n    def load_array file, converter, dtype, entry_type, symmetry\n      mat = nil\n\n      line = file.gets\n      line.chomp!\n      line.lstrip!\n\n      fields = line.split\n\n      mat = NMatrix.new :dense, [fields[0].to_i, fields[1].to_i], dtype\n\n      (0...mat.shape[1]).each do |j|\n        (0...mat.shape[0]).each do |i|\n          datum = file.gets.chomp.send(converter)\n          mat[i,j] = datum\n\n          unless i == j || symmetry == :general\n            if symmetry == :symmetric\n              mat[j,i] = datum\n            elsif symmetry == :hermitian\n              mat[j,i] = Complex.new(datum.real, -datum.imag)\n            elsif symmetry == :'skew-symmetric'\n              mat[j,i] = -datum\n            end\n          end\n        end\n      end\n\n      file.close\n\n      mat\n    end\n\n\n    # Creates a :list NMatrix from a coordinate-list MatrixMarket file.\n    def load_coordinate file, converter, dtype, entry_type, symmetry\n\n      mat = nil\n\n      # Read until we get the dimensions and nonzeros\n      while line = file.gets\n        line.chomp!\n        line.lstrip!\n        line, comment = line.split('%', 2) # ignore comments\n        if line.size > 4\n          shape0, shape1 = line.split\n          mat = NMatrix.new(:list, [shape0.to_i, shape1.to_i], 0, dtype)\n          break\n        end\n      end\n\n      # Now read the coordinates\n      while line = file.gets\n        line.chomp!\n        line.lstrip!\n        line, comment = line.split('%', 2) # ignore comments\n\n        next unless line.size >= 5 # ignore empty lines\n\n        fields = line.split\n\n        i = fields[0].to_i - 1\n        j = fields[1].to_i - 1\n        datum = entry_type == :pattern ? 1 : fields[2].send(converter)\n\n        mat[i, j] = datum # add to the matrix\n        unless i == j || symmetry == :general\n          if symmetry == :symmetric\n            mat[j, i] = datum\n          elsif symmetry == :'skew-symmetric'\n            mat[j, i] = -datum\n          elsif symmetry == :hermitian\n            mat[j, i] = Complex.new(datum.real, -datum.imag)\n          end\n        end\n      end\n\n      file.close\n\n      mat\n    end\n  end\nend\n"
  },
  {
    "path": "lib/nmatrix/io/mat5_reader.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2016, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2016, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == io/matlab/mat5_reader.rb\n#\n# Matlab version 5 .mat file reader (and eventually writer too).\n#\n#++\n\nrequire_relative './mat_reader.rb'\n\nmodule NMatrix::IO::Matlab\n\n  # Reader (and eventual writer) for a version 5 .mat file.\n  class Mat5Reader < MatReader #:nodoc:\n    attr_reader :file_header, :first_tag_field, :first_data_field\n\n    class Compressed #:nodoc:\n      include Packable\n\n      attr_reader :byte_order\n\n      def initialize(stream = nil, byte_order = nil, content_or_bytes = nil)\n        @stream   = stream\n        @byte_order = byte_order\n\n        if content_or_bytes.is_a?(String)\n          @content = content_or_bytes\n\n        elsif content_or_bytes.is_a?(Integer)\n          @padded_bytes = content_or_bytes\n        end\n      end\n\n      def compressed\n        require \"zlib\"\n        # [2..-5] removes headers\n        @compressed ||= Zlib::Deflate.deflate(content)\n      end\n\n      def content\n        @content ||= extract\n      end\n\n      def padded_bytes\n        @padded_bytes ||= content.size % 4 == 0 ? \\\n         content.size : (content.size / 4 + 1) * 4\n      end\n\n      def write_packed(packedio, options = {})\n        packedio << [compressed, {:bytes => padded_bytes}.merge(options)]\n      end\n\n      def read_packed(packedio, options)\n        @compressed = (packedio >> [String, options]).first\n        content\n      end\n\n      protected\n      def extract\n        require 'zlib'\n\n        zstream = Zlib::Inflate.new #(-Zlib::MAX_WBITS) # No header\n\n        returning(zstream.inflate(@compressed)) do\n          zstream.finish\n          zstream.close\n        end\n      end\n    end\n\n    MatrixDataStruct = Struct.new(\n                                  :cells, :logical, :global, :complex,\n                                  :nonzero_max,:matlab_class, :dimensions,\n                                  :matlab_name, :real_part,:imaginary_part,\n                                  :row_index, :column_index)\n\n    class MatrixData < MatrixDataStruct #:nodoc:\n      include Packable\n\n      def write_packed(packedio, options)\n        raise NotImplementedError\n        packedio << [info, {:bytes => padded_bytes}.merge(options)]\n      end\n\n      # call-seq:\n      #     to_ruby -> NMatrix\n      #     to_ruby -> Array\n      #\n      # Figure out the appropriate Ruby type to convert to, and do it. There\n      # are basically two possible types: +NMatrix+ and +Array+. This method\n      # is recursive, so an +Array+ is going to contain other +Array+s and/or\n      # +NMatrix+ objects.\n      #\n      # mxCELL types (cells) will be converted to the Array type.\n      #\n      # mxSPARSE and other types will be converted to NMatrix, with the\n      # appropriate stype (:yale or :dense, respectively).\n      #\n      # See also to_nm, which is responsible for NMatrix instantiation.\n      def to_ruby\n        case matlab_class\n        when :mxSPARSE then return to_nm\n        when :mxCELL  then return self.cells.collect { |c| c.to_ruby }\n        else         return to_nm\n        end\n      end\n\n      # call-seq:\n      #     guess_dtype_from_mdtype -> Symbol\n      #\n      # Try to determine what dtype and such to use.\n      #\n      # TODO: Needs to be verified that unsigned MATLAB types are being\n      # converted to the correct NMatrix signed dtypes.\n      def guess_dtype_from_mdtype\n        dtype = MatReader::MDTYPE_TO_DTYPE[self.real_part.tag.data_type]\n\n        return dtype unless self.complex\n\n        dtype == :float32 ? :complex64 : :complex128\n      end\n\n      #\n      # call-seq:\n      #     unpacked_data(real_mdtype = nil, imag_mdtype = nil) ->\n      #\n      # Unpacks data without repacking it.\n      #\n      # Used only for dense matrix creation. Yale matrix creation uses\n      # repacked_data.\n      #\n      def unpacked_data(real_mdtype = nil, imag_mdtype = nil)\n        # Get Matlab data type and unpack args\n        real_mdtype ||= self.real_part.tag.data_type\n        real_unpack_args = MatReader::MDTYPE_UNPACK_ARGS[real_mdtype]\n\n        # zip real and complex components together, or just return real component\n        if self.complex\n          imag_mdtype ||= self.imaginary_part.tag.data_type\n          imag_unpack_args = MatReader::MDTYPE_UNPACK_ARGS[imag_mdtype]\n\n          unpacked_real = self.real_part.data.unpack(real_unpack_args)\n          unpacked_imag = self.imaginary_part.data.unpack(imag_unpack_args)\n\n          unpacked_real.zip(unpacked_imag).flatten\n        else\n          length = self.dimensions.inject(1) { |a,b| a * b } # get the product\n          self.real_part.data.unpack(*(real_unpack_args*length))\n        end\n\n      end\n\n      # Unpacks and repacks data into the appropriate format for NMatrix.\n      #\n      # If data is already in the appropriate format, does not unpack or\n      # repack, just returns directly.\n      #\n      # Complex is always unpacked and repacked, as the real and imaginary\n      # components must be merged together (MATLAB stores them separately for\n      # some crazy reason).\n      #\n      # Used only for Yale storage creation. For dense, see unpacked_data.\n      #\n      # This function calls repack and complex_merge, which are both defined in\n      # io.cpp.\n      def repacked_data(to_dtype = nil)\n\n        real_mdtype = self.real_part.tag.data_type\n\n        # Figure out what dtype to use based on the MATLAB data-types\n        # (mdtypes). They could be different for real and imaginary, so call\n        # upcast to figure out what to use.\n\n        components = [] # real and imaginary parts or just the real part\n\n        if self.complex\n          imag_mdtype = self.imaginary_part.tag.data_type\n\n          # Make sure we convert both mdtypes do the same dtype\n          to_dtype ||= NMatrix.upcast(MatReader::MDTYPE_TO_DTYPE[real_mdtype], \\\n           MatReader::MDTYPE_TO_DTYPE[imag_mdtype])\n\n          # Let's make sure we don't try to send NMatrix complex integers.\n          #  We need complex floating points.\n          unless [:float32, :float64].include?(to_dtype)\n            to_dtype = NMatrix.upcast(to_dtype, :float32)\n          end\n\n          STDERR.puts \"imag: Requesting dtype #{to_dtype.inspect}\"\n          # Repack the imaginary part\n          components[1] = ::NMatrix::IO::Matlab.repack( self.imaginary_part.data, \\\n           imag_mdtype, :dtype => to_dtype )\n\n        else\n\n          to_dtype ||= MatReader::MDTYPE_TO_DTYPE[real_mdtype]\n\n          # Sometimes repacking isn't necessary -- sometimes the format is already good\n          if MatReader::NO_REPACK.include?(real_mdtype)\n            STDERR.puts \"No repack\"\n            return [self.real_part.data, to_dtype]\n          end\n\n        end\n\n        # Repack the real part\n        STDERR.puts \"real: Requesting dtype #{to_dtype.inspect}\"\n        components[0] = ::NMatrix::IO::Matlab.repack( \\\n         self.real_part.data, real_mdtype, :dtype => to_dtype )\n\n        # Merge the two parts if complex, or just return the real part.\n        [self.complex ? ::NMatrix::IO::Matlab.complex_merge( \\\n         components[0], components[1], to_dtype ) : components[0],\n         to_dtype]\n      end\n\n      # Unpacks and repacks index data into the appropriate format for NMatrix.\n      #\n      # If data is already in the appropriate format, does not unpack or\n      # repack, just returns directly.\n      def repacked_indices\n        repacked_row_indices = ::NMatrix::IO::Matlab.repack( \\\n         self.row_index.data, :miINT32, :itype )\n        repacked_col_indices = ::NMatrix::IO::Matlab.repack( \\\n         self.column_index.data, :miINT32, :itype )\n\n        [repacked_row_indices, repacked_col_indices]\n      end\n\n      #\n      # call-seq:\n      #     to_nm(dtype = nil) -> NMatrix\n      #\n      # Create an NMatrix from a MATLAB .mat (v5) matrix.\n      #\n      # This function matches the storage type exactly. That is, a regular\n      # matrix in MATLAB will be a dense NMatrix, and a sparse (old Yale) one\n      # in MATLAB will be a :yale (new Yale) matrix in NMatrix.\n      #\n      # Note that NMatrix has no old Yale type, so this uses a semi-hidden\n      # version of the NMatrix constructor to pass in --- as directly as\n      # possible -- the stored bytes in a MATLAB sparse matrix. This\n      # constructor should also be used for other IO formats that want to\n      # create sparse matrices from IA and JA vectors (e.g., SciPy).\n      #\n      # This is probably not the fastest code. An ideal solution would be a C\n      # plugin of some sort for reading the MATLAB .mat file. However, .mat v5\n      # is a really complicated format, and lends itself to an object-oriented\n      # solution.\n      #\n      def to_nm(dtype = nil)\n        # Hardest part is figuring out from_dtype, from_index_dtype, and dtype.\n        dtype   ||= guess_dtype_from_mdtype\n        from_dtype = MatReader::MDTYPE_TO_DTYPE[self.real_part.tag.data_type]\n\n        # Create the same kind of matrix that MATLAB saved.\n        case matlab_class\n        when :mxSPARSE\n          raise(NotImplementedError, \"expected .mat row indices to be of type :miINT32\") unless row_index.tag.data_type == :miINT32\n          raise(NotImplementedError, \"expected .mat column indices to be of type :miINT32\") unless column_index.tag.data_type == :miINT32\n          #require 'pry'\n          #binding.pry\n\n          # MATLAB always uses :miINT32 for indices according to the spec\n          ia_ja                     = repacked_indices\n          data_str, repacked_dtype  = repacked_data(dtype)\n          NMatrix.new(:yale, self.dimensions.reverse, repacked_dtype, \\\n           ia_ja[0], ia_ja[1], data_str, repacked_dtype)\n\n        else\n          # Call regular dense constructor.\n          NMatrix.new(:dense, self.dimensions.reverse, unpacked_data, dtype).transpose\n        end\n      end\n\n      def read_packed(packedio, options)\n        flags_class, self.nonzero_max = packedio.read([Element, options]).data\n\n        self.matlab_class   = MatReader::MCLASSES[flags_class % 16]\n\n        self.logical        = (flags_class >> 8) % 2 == 1 ? true : false\n        self.global         = (flags_class >> 9) % 2 == 1 ? true : false\n        self.complex        = (flags_class >> 10) % 2 == 1 ? true : false\n\n        dimensions_tag_data = packedio.read([Element, options])\n        self.dimensions     = dimensions_tag_data.data\n\n        begin\n          name_tag_data   = packedio.read([Element, options])\n          self.matlab_name = name_tag_data.data.is_a?(Array) ? \\\n           name_tag_data.data.collect { |i| i.chr }.join('') : \\\n           name_tag_data.data.chr\n\n        rescue ElementDataIOError => e\n          STDERR.puts \"ERROR: Failure while trying to read Matlab variable name: #{name_tag_data.inspect}\"\n          STDERR.puts 'Element Tag:'\n          STDERR.puts \"    #{e.tag}\"\n          STDERR.puts 'Previously, I read these dimensions:'\n          STDERR.puts \"    #{dimensions_tag_data.inspect}\"\n          STDERR.puts \"Unpack options were: #{options.inspect}\"\n          raise(e)\n        end\n\n        if self.matlab_class == :mxCELL\n          # Read what may be a series of matrices\n          self.cells = []\n          STDERR.puts(\"Warning: Cell array does not yet support reading multiple dimensions\") if dimensions.size > 2 || (dimensions[0] > 1 && dimensions[1] > 1)\n          number_of_cells = dimensions.inject(1) { |prod,i| prod * i }\n          number_of_cells.times { self.cells << \\\n           packedio.read([Element, options]) }\n\n        else\n          read_opts = [RawElement, {:bytes => options[:bytes], \\\n           :endian => :native}]\n\n          if self.matlab_class == :mxSPARSE\n            self.column_index = packedio.read(read_opts)\n            self.row_index    = packedio.read(read_opts)\n          end\n\n          self.real_part   = packedio.read(read_opts)\n          self.imaginary_part = packedio.read(read_opts) if self.complex\n        end\n      end\n\n      def ignore_padding(packedio, bytes)\n        packedio.read([Integer, {:unsigned => true, \\\n         :bytes => bytes}]) if bytes > 0\n      end\n    end\n\n\n    MDTYPE_UNPACK_ARGS =\n      MatReader::MDTYPE_UNPACK_ARGS.merge({\n                                            :miCOMPRESSED => [Compressed, {}],\n                                            :miMATRIX   => [MatrixData, {}]\n                                          })\n\n    FIRST_TAG_FIELD_POS = 128\n\n    ###################################\n    # Instance Methods for Mat5Reader #\n    ###################################\n\n    # call-seq:\n    #     NMatrix::IO::Mat5Reader.new(stream, options = {}) -> NMatrix\n    def initialize(stream, options = {})\n      super(stream, options)\n      @file_header = seek_and_read_file_header\n    end\n\n    def to_a\n      returning(Array.new) do |ary|\n        self.each { |el| ary << el }\n      end\n    end\n\n    def to_ruby\n      ary = self.to_a\n\n      if ary.size == 1\n        ary.first.to_ruby\n      else\n        ary.collect { |item| item.to_ruby }\n      end\n    end\n\n    def guess_byte_order\n      stream.seek(Header::BYTE_ORDER_POS)\n      mi = stream.read(Header::BYTE_ORDER_LENGTH)\n      stream.seek(0)\n      mi == 'IM' ? :little : :big\n    end\n\n    def seek_and_read_file_header\n      stream.seek(0)\n      stream.read(FIRST_TAG_FIELD_POS).unpack(Header, {:endian => byte_order})\n    end\n\n    def each(&block)\n      stream.each(Element, {:endian => byte_order}) do |element|\n        if element.data.is_a?(Compressed)\n          StringIO.new(element.data.content, 'rb').each(Element, \\\n             {:endian => byte_order}) do |compressed_element|\n            yield compressed_element.data\n          end\n\n        else\n          yield element.data\n        end\n      end\n\n      # Go back to the beginning in case we want to do it again.\n      stream.seek(FIRST_TAG_FIELD_POS)\n\n      self\n    end\n\n    # Internal Classes.\n\n    class Header < Struct.new(:desc, :data_offset, :version, :endian) #:nodoc:\n\n      include Packable\n\n      BYTE_ORDER_LENGTH  = 2\n      DESC_LENGTH     = 116\n      DATA_OFFSET_LENGTH = 8\n      VERSION_LENGTH   = 2\n      BYTE_ORDER_POS   = 126\n\n      # TODO: TEST WRITE.\n      def write_packed(packedio, options)\n        packedio << [desc,    {:bytes => DESC_LENGTH    }] <<\n          [data_offset, {:bytes => DATA_OFFSET_LENGTH }] <<\n          [version,   {:bytes => VERSION_LENGTH   }] <<\n          [byte_order, {:bytes => BYTE_ORDER_LENGTH }]\n      end\n\n      def read_packed(packedio, options)\n        self.desc, self.data_offset, self.version, self.endian = packedio >>\n          [String, {:bytes => DESC_LENGTH                 }] >>\n          [String, {:bytes => DATA_OFFSET_LENGTH              }] >>\n          [Integer, {:bytes => VERSION_LENGTH, :endian => options[:endian] }] >>\n          [String, {:bytes => 2                      }]\n\n        self.desc.strip!\n        self.data_offset.strip!\n        self.data_offset = nil if self.data_offset.empty?\n\n        self.endian == 'IM' ? :little : :big\n      end\n    end\n\n    class Tag < Struct.new(:data_type, :raw_data_type, :bytes, :small) #:nodoc:\n      include Packable\n\n      DATA_TYPE_OPTS = BYTES_OPTS = {:bytes => 4, :signed => false}\n      LENGTH = DATA_TYPE_OPTS[:bytes] + BYTES_OPTS[:bytes]\n\n      # TODO: TEST WRITE.\n      def write_packed packedio, options\n        packedio << [data_type, DATA_TYPE_OPTS] << [bytes, BYTES_OPTS]\n      end\n\n      def small?\n        self.bytes > 0 and self.bytes <= 4\n      end\n\n      def size\n        small? ? 4 : 8\n      end\n\n      def read_packed packedio, options\n        self.raw_data_type = packedio.read([Integer, \\\n         DATA_TYPE_OPTS.merge(options)])\n\n        # Borrowed from a SciPy patch\n        upper = self.raw_data_type >> 16\n        lower = self.raw_data_type & 0xFFFF\n\n        if upper > 0\n          # Small data element format\n          raise IOError, 'Small data element format indicated, but length is more than 4 bytes!' if upper > 4\n\n          self.bytes     = upper\n          self.raw_data_type = lower\n\n        else\n          self.bytes = packedio.read([Integer, BYTES_OPTS.merge(options)])\n        end\n\n        self.data_type = MatReader::MDTYPES[self.raw_data_type]\n      end\n\n      def inspect\n        \"#<#{self.class.to_s} data_type=#{data_type}[#{raw_data_type}][#{raw_data_type.to_s(2)}] bytes=#{bytes} size=#{size}#{small? ? ' small' : ''}>\"\n      end\n    end\n\n\n    class ElementDataIOError < IOError #:nodoc:\n      attr_reader :tag\n\n      def initialize(tag = nil, msg = nil)\n        @tag = tag\n        super msg\n      end\n\n      def to_s\n        @tag.inspect + \"\\n\" + super\n      end\n    end\n\n\n    class Element < Struct.new(:tag, :data) #:nodoc:\n      include Packable\n\n      def write_packed packedio, options\n        packedio << [tag, {}] << [data, {}]\n      end\n\n      def read_packed(packedio, options)\n        raise(ArgumentError, 'Missing mandatory option :endian.') \\\n         unless options.has_key?(:endian)\n\n        tag = packedio.read([Tag, {:endian => options[:endian]}])\n        data_type = MDTYPE_UNPACK_ARGS[tag.data_type]\n\n        self.tag = tag\n\n        raise ElementDataIOError.new(tag, \"Unrecognized Matlab type #{tag.raw_data_type}\") \\\n         if data_type.nil?\n\n        if tag.bytes == 0\n          self.data = []\n\n        else\n          number_of_reads = data_type[1].has_key?(:bytes) ? \\\n           tag.bytes / data_type[1][:bytes] : 1\n          data_type[1].merge!({:endian => options[:endian]})\n\n          if number_of_reads == 1\n            self.data = packedio.read(data_type)\n\n          else\n            self.data =\n              returning(Array.new) do |ary|\n              number_of_reads.times { ary << packedio.read(data_type) }\n            end\n          end\n\n          begin\n            ignore_padding(packedio, (tag.bytes + tag.size) % 8) \\\n             unless [:miMATRIX, :miCOMPRESSED].include?(tag.data_type)\n\n          rescue EOFError\n            STDERR.puts self.tag.inspect\n            raise(ElementDataIOError.new(tag, \"Ignored too much\"))\n          end\n        end\n      end\n\n      def ignore_padding(packedio, bytes)\n        if bytes > 0\n          #STDERR.puts \"Ignored #{8 - bytes} on #{self.tag.data_type}\"\n          ignored = packedio.read(8 - bytes)\n          ignored_unpacked = ignored.unpack(\"C*\")\n          raise(IOError, \"Nonzero padding detected: #{ignored_unpacked}\") \\\n           if ignored_unpacked.any? { |i| i != 0 }\n        end\n      end\n\n      def to_ruby\n        data.to_ruby\n      end\n    end\n\n    # Doesn't unpack the contents of the element, e.g., if we want to handle\n    # manually, or pass the raw string of bytes into NMatrix.\n    class RawElement < Element #:nodoc:\n      def read_packed(packedio, options)\n        raise(ArgumentError, 'Missing mandatory option :endian.') \\\n         unless options.has_key?(:endian)\n\n        self.tag = packedio.read([Tag,   {:endian => options[:endian]}])\n        self.data = packedio.read([String, {:endian => options[:endian], \\\n         :bytes => tag.bytes }])\n\n        begin\n          ignore_padding(packedio, (tag.bytes + tag.size) % 8) \\\n           unless [:miMATRIX, :miCOMPRESSED].include?(tag.data_type)\n\n        rescue EOFError\n          STDERR.puts self.tag.inspect\n          raise ElementDataIOError.new(tag, 'Ignored too much.')\n        end\n      end\n    end\n\n    #####################\n    # End of Mat5Reader #\n    #####################\n\n  end\nend\n"
  },
  {
    "path": "lib/nmatrix/io/mat_reader.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2016, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2016, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == io/mat_reader.rb\n#\n# Base class for .mat file reading (Matlab files).\n#\n#++\n\nrequire 'packable'\n\nmodule NMatrix::IO::Matlab\n\n  # Class for parsing a .mat file stream.\n  #\n  # The full format of .mat files is available here:\n  # * http://www.mathworks.com/help/pdf_doc/matlab/matfile_format.pdf\n  class MatReader #:nodoc:\n    MDTYPE_UNPACK_ARGS = {\n      :miINT8   => [Integer, {:signed    => true,    :bytes => 1}],\n      :miUINT8  => [Integer, {:signed    => false,   :bytes => 1}],\n      :miINT16  => [Integer, {:signed    => true,    :bytes => 2}],\n      :miUINT16 => [Integer, {:signed    => false,   :bytes => 2}],\n      :miINT32  => [Integer, {:signed    => true,    :bytes => 4}],\n      :miUINT32 => [Integer, {:signed    => false,   :bytes => 4}],\n      :miSINGLE => [Float,   {:precision => :single,\n                              :bytes => 4, :endian => :native}],\n      :miDOUBLE => [Float,   {:precision => :double,\n                              :bytes => 4, :endian => :native}],\n      :miINT64  => [Integer, {:signed    => true,    :bytes => 8}],\n      :miUINT64 => [Integer, {:signed    => false,   :bytes => 8}]\n    }\n\n    DTYPE_PACK_ARGS = {\n      :byte       => [Integer, {:signed => false,\n                                :bytes => 1}],\n      :int8       => [Integer, {:signed => true,\n                                :bytes => 1}],\n      :int16      => [Integer, {:signed => true,\n                                :bytes => 2}],\n      :int32      => [Integer, {:signed => true,\n                                :bytes => 4}],\n      :int64      => [Integer, {:signed => true,\n                                :bytes => 8}],\n      :float32    => [Float,   {:precision => :single,\n                                :bytes => 4, :endian => :native}],\n      :float64    => [Float,   {:precision => :double,\n                                :bytes => 8, :endian => :native}],\n      :complex64  => [Float,   {:precision => :single,\n                                :bytes => 4, :endian => :native}], #2x\n      :complex128 => [Float,   {:precision => :double,\n                                :bytes => 8, :endian => :native}]\n    }\n\n    ITYPE_PACK_ARGS = {\n      :uint8  => [Integer, {:signed => false, :bytes => 1}],\n      :uint16 => [Integer, {:signed => false, :bytes => 2}],\n      :uint32 => [Integer, {:signed => false, :bytes => 4}],\n      :uint64 => [Integer, {:signed => false, :bytes => 8}],\n    }\n\n    NO_REPACK = [:miINT8, :miUINT8, :miINT16,\n                 :miINT32, :miSINGLE, :miDOUBLE, :miINT64]\n\n    # Convert from MATLAB dtype to NMatrix dtype.\n    MDTYPE_TO_DTYPE = {\n      :miUINT8  => :byte,\n      :miINT8   => :int8,\n      :miINT16  => :int16,\n      :miUINT16 => :int16,\n      :miINT32  => :int32,\n      :miUINT32 => :int32,\n      :miINT64  => :int64,\n      :miUINT64 => :int64,\n      :miSINGLE => :float32,\n      :miDOUBLE => :float64\n    }\n\n    MDTYPE_TO_ITYPE = {\n      :miUINT8  => :uint8,\n      :miINT8   => :uint8,\n      :miINT16  => :uint16,\n      :miUINT16 => :uint16,\n      :miINT32  => :uint32,\n      :miUINT32 => :uint32,\n      :miINT64  => :uint64,\n      :miUINT64 => :uint64\n    }\n\n    # Before release v7.1 (release 14) matlab (TM) used the system\n    # default character encoding scheme padded out to 16-bits. Release 14\n    # and later use Unicode. When saving character data, R14 checks if it\n    # can be encoded in 7-bit ascii, and saves in that format if so.\n    MDTYPES = [\n               nil,\n               :miINT8,\n               :miUINT8,\n               :miINT16,\n               :miUINT16,\n               :miINT32,\n               :miUINT32,\n               :miSINGLE,\n               nil,\n               :miDOUBLE,\n               nil,\n               nil,\n               :miINT64,\n               :miUINT64,\n               :miMATRIX,\n               :miCOMPRESSED,\n               :miUTF8,\n               :miUTF16,\n               :miUTF32\n              ]\n\n    MCLASSES = [\n                nil,\n                :mxCELL,\n                :mxSTRUCT,\n                :mxOBJECT,\n                :mxCHAR,\n                :mxSPARSE,\n                :mxDOUBLE,\n                :mxSINGLE,\n                :mxINT8,\n                :mxUINT8,\n                :mxINT16,\n                :mxUINT16,\n                :mxINT32,\n                :mxUINT32,\n                :mxINT64,\n                :mxUINT64,\n                :mxFUNCTION,\n                :mxOPAQUE,\n                :mxOBJECT_CLASS_FROM_MATRIX_H\n               ]\n\n    attr_reader :byte_order\n\n\n    # call-seq:\n    #     new(stream, options = {}) -> MatReader\n    #\n    # * *Raises* :\n    #   - +ArgumentError+ -> First argument must be IO.\n    #\n    def initialize(stream, options = {})\n      raise ArgumentError, 'First arg must be IO.' unless stream.is_a?(::IO)\n\n      @stream     = stream\n      @byte_order = options[:byte_order] || guess_byte_order\n    end\n\n    # call-seq:\n    #     guess_byte_order -> Symbol\n    #\n    def guess_byte_order\n      # Assume native, since we don't know what type of file we have.\n      :native\n    end\n\n    protected\n\n    attr_reader :stream\n  end\nend\n"
  },
  {
    "path": "lib/nmatrix/io/point_cloud.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2016, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2016, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == io/point_cloud.rb\n#\n# Point Cloud Library (PCL) PCD file IO functions.\n#\n#++\n\n# Reader for Point Cloud Data (PCD) file format.\n#\n# The documentation of this format can be found in:\n#\n# http://pointclouds.org/documentation/tutorials/pcd_file_format.php\n#\n# Note that this implementation does not take the width or height parameters\n# into account.\nmodule NMatrix::IO::PointCloud\n\n  # For UINT, just add 1 to the index.\n  INT_DTYPE_BY_SIZE   = [:int8, :int8, :int16, :int32, :int64, :int64] #:nodoc:\n  FLOAT_DTYPE_BY_SIZE = {4 => :float32, 8 => :float64} #:nodoc:\n\n  class << self\n    # call-seq:\n    #     load(filename) -> NMatrix\n    #\n    # * *Arguments* :\n    #   - +filename+ -> String giving the name of the file to be loaded.\n    #\n    # Load a Point Cloud Library PCD file as a matrix.\n    def load(filename)\n      MetaReader.new(filename).matrix\n    end\n  end\n\n  class MetaReader #:nodoc:\n    ENTRIES = [:version,  :fields,           :size,  :type,\n               :count,  :width,  :height,  :viewpoint,  :points,  :data]\n    ASSIGNS = [:version=, :fields=,          :size=, :type=,\n               :count=, :width=, :height=, :viewpoint=, :points=, :data=]\n    CONVERT = [:to_s,     :downcase_to_sym,  :to_i,  :downcase_to_sym,\n      :to_i,   :to_i,   :to_i,    :to_f,       :to_i,    :downcase_to_sym]\n\n    DTYPE_CONVERT = {:byte => :to_i, :int8 => :to_i, :int16 => :to_i,\n           :int32 => :to_i, :float32 => :to_f, :float64 => :to_f}\n\n    # For UINT, just add 1 to the index.\n    INT_DTYPE_BY_SIZE   = {1 => :int8,    2 => :int16,   4 => :int32,\n       8 => :int64,  16 => :int64}\n    FLOAT_DTYPE_BY_SIZE = {1 => :float32, 2 => :float32, 4 => :float32,\n       8 => :float64,16 => :float64}\n\n    class << self\n\n      # Given a type and a number of bytes, figure out an appropriate dtype\n      def dtype_by_type_and_size t, s\n        if t == :f\n          FLOAT_DTYPE_BY_SIZE[s]\n        elsif t == :u\n          return :byte if s == 1\n          INT_DTYPE_BY_SIZE[s*2]\n        else\n          INT_DTYPE_BY_SIZE[s]\n        end\n      end\n    end\n\n    # call-seq:\n    #     PointCloudReader::MetaReader.new(filename) -> MetaReader\n    #\n    # * *Arguments* :\n    #   - +filename+ -> String giving the name of the file to be loaded.\n    # * *Raises* :\n    #   - +NotImplementedError+ -> only ASCII supported currently\n    #   - +IOError+ -> premature end of file\n    #\n    # Open a file and read the metadata at the top; then read the PCD into an\n    # NMatrix.\n    #\n    # In addition to the fields in the PCD file, there will be at least one\n    # additional attribute, :matrix, storing the data.\n    def initialize filename\n      f = File.new(filename, \"r\")\n\n      ENTRIES.each.with_index do |entry,i|\n        read_entry(f, entry, ASSIGNS[i], CONVERT[i])\n      end\n\n      raise(NotImplementedError, \"only ASCII supported currently\") \\\n       unless self.data.first == :ascii\n\n      @matrix = NMatrix.new(self.shape, dtype: self.dtype)\n\n      # Do we want to use to_i or to_f?\n      convert = DTYPE_CONVERT[self.dtype]\n\n      i = 0\n      while line = f.gets\n        @matrix[i,:*] = line.chomp.split.map { |f| f.send(convert) }\n        i += 1\n      end\n\n      raise(IOError, \"premature end of file\") if i < self.points[0]\n\n    end\n\n    attr_accessor *ENTRIES\n    attr_reader :matrix\n\n  protected\n    # Read the current entry of the header.\n    def read_entry f, entry, assign=nil, convert=nil\n      assign ||= (entry.to_s + \"=\").to_sym\n\n      while line = f.gets\n        next if line =~ /^\\s*#/ # ignore comment lines\n        line = line.chomp.split(/\\s*#/)[0] # ignore the comments after any data\n\n        # Split, remove the entry name, and convert to the correct type.\n        self.send(assign,\n                  line.split.tap { |t| t.shift }.map do |f|\n                    if convert.nil?\n                      f\n                    elsif convert == :downcase_to_sym\n                      f.downcase.to_sym\n                    else\n                      f.send(convert)\n                    end\n                  end)\n\n        # We don't really want to loop.\n        break\n      end\n\n      self.send(entry)\n    end\n\n\n    # Determine the dtype for a matrix based on the types and\n    #  sizes given in the PCD.\n    #  Call this only after read_entry has been called.\n    def dtype\n      @dtype ||= begin\n        dtypes = self.type.map.with_index do |t,k|\n          MetaReader.dtype_by_type_and_size(t, size[k])\n        end.sort.uniq\n\n        # This could probably save one comparison at most, but we assume that\n        # worst case isn't going to happen very often.\n        while dtypes.size > 1\n          d = NMatrix.upcast(dtypes[0], dtypes[1])\n          dtypes.shift\n          dtypes[0] = d\n        end\n\n        dtypes[0]\n      end\n    end\n\n    # Determine the shape of the matrix.\n    def shape\n      @shape ||= [\n          self.points[0],\n          self.fields.size\n      ]\n    end\n  end\nend\n"
  },
  {
    "path": "lib/nmatrix/jruby/decomposition.rb",
    "content": "class NMatrix\n\n  # discussion in https://github.com/SciRuby/nmatrix/issues/374\n\n  def matrix_solve rhs\n    if rhs.shape[1] > 1\n      nmatrix = NMatrix.new :copy\n      nmatrix.shape = rhs.shape\n      res = []\n      #Solve a matrix and store the vectors in a matrix\n      (0...rhs.shape[1]).each do |i|\n        res << self.solve(rhs.col(i)).s.toArray.to_a\n      end\n      #res is in col major format\n      result = ArrayGenerator.getArrayColMajorDouble res.to_java :double, rhs.shape[0], rhs.shape[1]\n      nmatrix.s = ArrayRealVector.new result\n\n      return nmatrix\n    else\n      return self.solve rhs\n    end\n  end\n\nend"
  },
  {
    "path": "lib/nmatrix/jruby/enumerable.rb",
    "content": "# Source: https://github.com/marcandre/backports/blob/master/lib/backports/rails/enumerable.rb\nmodule Enumerable\n  # Standard in rails... See official documentation[http://api.rubyonrails.org/classes/Enumerable.html]\n  # Modified from rails 2.3 to not rely on size\n  def sum(identity = 0, &block)\n    if block_given?\n      map(&block).sum(identity)\n    else\n      inject { |sum, element| sum + element } || identity\n    end\n  end unless method_defined? :sum\n\nend"
  },
  {
    "path": "lib/nmatrix/jruby/error.rb",
    "content": "DataTypeError = Class.new(StandardError)\nStorageTypeError = Class.new(StandardError)\nShapeError = Class.new(StandardError)\nNotInvertibleError = Class.new(StandardError)"
  },
  {
    "path": "lib/nmatrix/jruby/math.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == math.rb\n#\n# Math functionality for NMatrix, along with any NMatrix instance\n# methods that correspond to ATLAS/BLAS/LAPACK functions (e.g.,\n# laswp).\n#++\n\nclass NMatrix\n\n  #\n  # call-seq:\n  #     getrf! -> Array\n  #\n  # LU factorization of a general M-by-N matrix +A+ using partial pivoting with\n  # row interchanges. The LU factorization is A = PLU, where P is a row permutation\n  # matrix, L is a lower triangular matrix with unit diagonals, and U is an upper\n  # triangular matrix (note that this convention is different from the\n  # clapack_getrf behavior, but matches the standard LAPACK getrf).\n  # +A+ is overwritten with the elements of L and U (the unit\n  # diagonal elements of L are not saved). P is not returned directly and must be\n  # constructed from the pivot array ipiv. The row indices in ipiv are indexed\n  # starting from 1.\n  # Only works for dense matrices.\n  #\n  # * *Returns* :\n  #   - The IPIV vector. The L and U matrices are stored in A.\n  # * *Raises* :\n  #   - +StorageTypeError+ -> ATLAS functions only work on dense matrices.\n  #\n  def getrf!\n    ipiv = LUDecomposition.new(self.twoDMat).getPivot.to_a\n    return ipiv\n  end\n\n  #\n  # call-seq:\n  #     geqrf! -> shape.min x 1 NMatrix\n  #\n  # QR factorization of a general M-by-N matrix +A+.\n  #\n  # The QR factorization is A = QR, where Q is orthogonal and R is Upper Triangular\n  # +A+ is overwritten with the elements of R and Q with Q being represented by the\n  # elements below A's diagonal and an array of scalar factors in the output NMatrix.\n  #\n  # The matrix Q is represented as a product of elementary reflectors\n  #     Q = H(1) H(2) . . . H(k), where k = min(m,n).\n  #\n  # Each H(i) has the form\n  #\n  #     H(i) = I - tau * v * v'\n  #\n  # http://www.netlib.org/lapack/explore-html/d3/d69/dgeqrf_8f.html\n  #\n  # Only works for dense matrices.\n  #\n  # * *Returns* :\n  #   - Vector TAU. Q and R are stored in A. Q is represented by TAU and A\n  # * *Raises* :\n  #   - +StorageTypeError+ -> LAPACK functions only work on dense matrices.\n  #\n  def geqrf!\n    # The real implementation is in lib/nmatrix/lapacke.rb\n    raise(NotImplementedError, \"geqrf! requires the nmatrix-lapacke gem\")\n  end\n\n  #\n  # call-seq:\n  #     ormqr(tau) -> NMatrix\n  #     ormqr(tau, side, transpose, c) -> NMatrix\n  #\n  # Returns the product Q * c or c * Q after a call to geqrf! used in QR factorization.\n  # +c+ is overwritten with the elements of the result NMatrix if supplied. Q is the orthogonal matrix\n  # represented by tau and the calling NMatrix\n  #\n  # Only works on float types, use unmqr for complex types.\n  #\n  # == Arguments\n  #\n  # * +tau+ - vector containing scalar factors of elementary reflectors\n  # * +side+ - direction of multiplication [:left, :right]\n  # * +transpose+ - apply Q with or without transpose [false, :transpose]\n  # * +c+ - NMatrix multplication argument that is overwritten, no argument assumes c = identity\n  #\n  # * *Returns* :\n  #\n  #   - Q * c or c * Q Where Q may be transposed before multiplication.\n  #\n  #\n  # * *Raises* :\n  #   - +StorageTypeError+ -> LAPACK functions only work on dense matrices.\n  #   - +TypeError+ -> Works only on floating point matrices, use unmqr for complex types\n  #   - +TypeError+ -> c must have the same dtype as the calling NMatrix\n  #\n  def ormqr(tau, side=:left, transpose=false, c=nil)\n    # The real implementation is in lib/nmatrix/lapacke.rb\n    raise(NotImplementedError, \"ormqr requires the nmatrix-lapacke gem\")\n\n  end\n\n  #\n  # call-seq:\n  #     unmqr(tau) -> NMatrix\n  #     unmqr(tau, side, transpose, c) -> NMatrix\n  #\n  # Returns the product Q * c or c * Q after a call to geqrf! used in QR factorization.\n  # +c+ is overwritten with the elements of the result NMatrix if it is supplied. Q is the orthogonal matrix\n  # represented by tau and the calling NMatrix\n  #\n  # Only works on complex types, use ormqr for float types.\n  #\n  # == Arguments\n  #\n  # * +tau+ - vector containing scalar factors of elementary reflectors\n  # * +side+ - direction of multiplication [:left, :right]\n  # * +transpose+ - apply Q as Q or its complex conjugate [false, :complex_conjugate]\n  # * +c+ - NMatrix multplication argument that is overwritten, no argument assumes c = identity\n  #\n  # * *Returns* :\n  #\n  #   - Q * c or c * Q Where Q may be transformed to its complex conjugate before multiplication.\n  #\n  #\n  # * *Raises* :\n  #   - +StorageTypeError+ -> LAPACK functions only work on dense matrices.\n  #   - +TypeError+ -> Works only on floating point matrices, use unmqr for complex types\n  #   - +TypeError+ -> c must have the same dtype as the calling NMatrix\n  #\n  def unmqr(tau, side=:left, transpose=false, c=nil)\n    # The real implementation is in lib/nmatrix/lapacke.rb\n    raise(NotImplementedError, \"unmqr requires the nmatrix-lapacke gem\")\n  end\n\n  #\n  # call-seq:\n  #     potrf!(upper_or_lower) -> NMatrix\n  #\n  # Cholesky factorization of a symmetric positive-definite matrix -- or, if complex,\n  # a Hermitian positive-definite matrix +A+.\n  # The result will be written in either the upper or lower triangular portion of the\n  # matrix, depending on whether the argument is +:upper+ or +:lower+.\n  # Also the function only reads in the upper or lower part of the matrix,\n  # so it doesn't actually have to be symmetric/Hermitian.\n  # However, if the matrix (i.e. the symmetric matrix implied by the lower/upper\n  # half) is not positive-definite, the function will return nonsense.\n  #\n  # This functions requires either the nmatrix-atlas or nmatrix-lapacke gem\n  # installed.\n  #\n  # * *Returns* :\n  #   the triangular portion specified by the parameter\n  # * *Raises* :\n  #   - +StorageTypeError+ -> ATLAS functions only work on dense matrices.\n  #   - +ShapeError+ -> Must be square.\n  #   - +NotImplementedError+ -> If called without nmatrix-atlas or nmatrix-lapacke gem\n  #\n  def potrf!(which)\n    # The real implementation is in the plugin files.\n    cholesky = CholeskyDecomposition.new(self.twoDMat)\n    if which == :upper\n      u = create_dummy_nmatrix\n      twoDMat = cholesky.getLT\n      u.s = ArrayRealVector.new(ArrayGenerator.getArrayDouble(twoDMat.getData, @shape[0], @shape[1]))\n      return u\n    else\n      l = create_dummy_nmatrix\n      twoDMat = cholesky.getL\n      l.s = ArrayRealVector.new(ArrayGenerator.getArrayDouble(twoDMat.getData, @shape[0], @shape[1]))\n      return l\n    end\n  end\n\n  def potrf_upper!\n    potrf! :upper\n  end\n\n  def potrf_lower!\n    potrf! :lower\n  end\n\n\n  #\n  # call-seq:\n  #     factorize_cholesky -> [upper NMatrix, lower NMatrix]\n  #\n  # Calculates the Cholesky factorization of a matrix and returns the\n  # upper and lower matrices such that A=LU and L=U*, where * is\n  # either the transpose or conjugate transpose.\n  #\n  # Unlike potrf!, this makes method requires that the original is matrix is\n  # symmetric or Hermitian. However, it is still your responsibility to make\n  # sure it is positive-definite.\n  def factorize_cholesky\n    # raise \"Matrix must be symmetric/Hermitian for Cholesky factorization\" unless self.hermitian?\n    cholesky = CholeskyDecomposition.new(self.twoDMat)\n    l = create_dummy_nmatrix\n    twoDMat = cholesky.getL\n    l.s = ArrayRealVector.new(ArrayGenerator.getArrayDouble(twoDMat.getData, @shape[0], @shape[1]))\n    u = create_dummy_nmatrix\n    twoDMat = cholesky.getLT\n    u.s = ArrayRealVector.new(ArrayGenerator.getArrayDouble(twoDMat.getData, @shape[0], @shape[1]))\n    return [u,l]\n  end\n\n  #\n  # call-seq:\n  #     factorize_lu -> ...\n  #\n  # LU factorization of a matrix. Optionally return the permutation matrix.\n  #   Note that computing the permutation matrix will introduce a slight memory\n  #   and time overhead.\n  #\n  # == Arguments\n  #\n  # +with_permutation_matrix+ - If set to *true* will return the permutation\n  #   matrix alongwith the LU factorization as a second return value.\n  #\n  def factorize_lu with_permutation_matrix=nil\n    raise(NotImplementedError, \"only implemented for dense storage\") unless self.stype == :dense\n    raise(NotImplementedError, \"matrix is not 2-dimensional\") unless self.dimensions == 2\n    t = self.clone\n    pivot = create_dummy_nmatrix\n    twoDMat = LUDecomposition.new(self.twoDMat).getP\n    pivot.s = ArrayRealVector.new(ArrayGenerator.getArrayDouble(twoDMat.getData, @shape[0], @shape[1]))\n    return [t,pivot]\n  end\n\n  #\n  # call-seq:\n  #     factorize_qr -> [Q,R]\n  #\n  # QR factorization of a matrix without column pivoting.\n  # Q is orthogonal and R is upper triangular if input is square or upper trapezoidal if\n  # input is rectangular.\n  #\n  # Only works for dense matrices.\n  #\n  # * *Returns* :\n  #   - Array containing Q and R matrices\n  #\n  # * *Raises* :\n  #   - +StorageTypeError+ -> only implemented for desnse storage.\n  #   - +ShapeError+ -> Input must be a 2-dimensional matrix to have a QR decomposition.\n  #\n  def factorize_qr\n\n    raise(NotImplementedError, \"only implemented for dense storage\") unless self.stype == :dense\n    raise(ShapeError, \"Input must be a 2-dimensional matrix to have a QR decomposition\") unless self.dim == 2\n    qrdecomp = QRDecomposition.new(self.twoDMat)\n\n    qmat = create_dummy_nmatrix\n    qtwoDMat = qrdecomp.getQ\n    qmat.s = ArrayRealVector.new(ArrayGenerator.getArrayDouble(qtwoDMat.getData, @shape[0], @shape[1]))\n\n    rmat = create_dummy_nmatrix\n    rtwoDMat = qrdecomp.getR\n    rmat.s = ArrayRealVector.new(ArrayGenerator.getArrayDouble(rtwoDMat.getData, @shape[0], @shape[1]))\n    return [qmat,rmat]\n\n  end\n\n  # Solve the matrix equation AX = B, where A is +self+, B is the first\n  # argument, and X is returned. A must be a nxn square matrix, while B must be\n  # nxm. Only works with dense matrices and non-integer, non-object data types.\n  #\n  # == Arguments\n  #\n  # * +b+ - the right hand side\n  #\n  # == Options\n  #\n  # * +form+ - Signifies the form of the matrix A in the linear system AX=B.\n  #   If not set then it defaults to +:general+, which uses an LU solver.\n  #   Other possible values are +:lower_tri+, +:upper_tri+ and +:pos_def+ (alternatively,\n  #   non-abbreviated symbols +:lower_triangular+, +:upper_triangular+,\n  #   and +:positive_definite+ can be used.\n  #   If +:lower_tri+ or +:upper_tri+ is set, then a specialized linear solver for linear\n  #   systems AX=B with a lower or upper triangular matrix A is used. If +:pos_def+ is chosen,\n  #   then the linear system is solved via the Cholesky factorization.\n  #   Note that when +:lower_tri+ or +:upper_tri+ is used, then the algorithm just assumes that\n  #   all entries in the lower/upper triangle of the matrix are zeros without checking (which\n  #   can be useful in certain applications).\n  #\n  #\n  # == Usage\n  #\n  #   a = NMatrix.new [2,2], [3,1,1,2], dtype: dtype\n  #   b = NMatrix.new [2,1], [9,8], dtype: dtype\n  #   a.solve(b)\n  #\n  #   # solve an upper triangular linear system more efficiently:\n  #   require 'benchmark'\n  #   require 'nmatrix/lapacke'\n  #   rand_mat = NMatrix.random([10000, 10000], dtype: :float64)\n  #   a = rand_mat.triu\n  #   b = NMatrix.random([10000, 10], dtype: :float64)\n  #   Benchmark.bm(10) do |bm|\n  #     bm.report('general') { a.solve(b) }\n  #     bm.report('upper_tri') { a.solve(b, form: :upper_tri) }\n  #   end\n  #   #                   user     system      total        real\n  #   #  general     73.170000   0.670000  73.840000 ( 73.810086)\n  #   #  upper_tri    0.180000   0.000000   0.180000 (  0.182491)\n  #\n  def solve(b, opts = {})\n    raise(ShapeError, \"Must be called on square matrix\") unless self.dim == 2 && self.shape[0] == self.shape[1]\n    raise(ShapeError, \"number of rows of b must equal number of cols of self\") if\n      self.shape[1] != b.shape[0]\n    raise(ArgumentError, \"only works with dense matrices\") if self.stype != :dense\n    raise(ArgumentError, \"only works for non-integer, non-object dtypes\") if\n      integer_dtype? or object_dtype? or b.integer_dtype? or b.object_dtype?\n\n    opts = { form: :general }.merge(opts)\n    x    = b.clone\n    n    = self.shape[0]\n    nrhs = b.shape[1]\n\n    nmatrix = create_dummy_nmatrix\n    case opts[:form]\n    when :general, :upper_tri, :upper_triangular, :lower_tri, :lower_triangular\n      #LU solver\n      solver = LUDecomposition.new(self.twoDMat).getSolver\n      nmatrix.s = solver.solve(b.s)\n      return nmatrix\n    when :pos_def, :positive_definite\n      solver = CholeskyDecomposition.new(self.twoDMat).getSolver\n      nmatrix.s = solver.solve(b.s)\n      return nmatrix\n    else\n      raise(ArgumentError, \"#{opts[:form]} is not a valid form option\")\n    end\n\n  end\n\n  #\n  # call-seq:\n  #     det -> determinant\n  #\n  # Calculate the determinant by way of LU decomposition. This is accomplished\n  # using clapack_getrf, and then by taking the product of the diagonal elements. There is a\n  # risk of underflow/overflow.\n  #\n  # There are probably also more efficient ways to calculate the determinant.\n  # This method requires making a copy of the matrix, since clapack_getrf\n  # modifies its input.\n  #\n  # For smaller matrices, you may be able to use +#det_exact+.\n  #\n  # This function is guaranteed to return the same type of data in the matrix\n  # upon which it is called.\n  #\n  # Integer matrices are converted to floating point matrices for the purposes of\n  # performing the calculation, as xGETRF can't work on integer matrices.\n  #\n  # * *Returns* :\n  #   - The determinant of the matrix. It's the same type as the matrix's dtype.\n  # * *Raises* :\n  #   - +ShapeError+ -> Must be used on square matrices.\n  #\n  def det\n    raise(ShapeError, \"determinant can be calculated only for square matrices\") unless self.dim == 2 && self.shape[0] == self.shape[1]\n    self.det_exact2\n  end\n\n  #\n  # call-seq:\n  #     complex_conjugate -> NMatrix\n  #     complex_conjugate(new_stype) -> NMatrix\n  #\n  # Get the complex conjugate of this matrix. See also complex_conjugate! for\n  # an in-place operation (provided the dtype is already +:complex64+ or\n  # +:complex128+).\n  #\n  # Doesn't work on list matrices, but you can optionally pass in the stype you\n  # want to cast to if you're dealing with a list matrix.\n  #\n  # * *Arguments* :\n  #   - +new_stype+ -> stype for the new matrix.\n  # * *Returns* :\n  #   - If the original NMatrix isn't complex, the result is a +:complex128+ NMatrix. Otherwise, it's the original dtype.\n  #\n  def complex_conjugate(new_stype = self.stype)\n    self.cast(new_stype, NMatrix::upcast(dtype, :complex64)).complex_conjugate!\n  end\n\n  #\n  # call-seq:\n  #     conjugate_transpose -> NMatrix\n  #\n  # Calculate the conjugate transpose of a matrix. If your dtype is already\n  # complex, this should only require one copy (for the transpose).\n  #\n  # * *Returns* :\n  #   - The conjugate transpose of the matrix as a copy.\n  #\n  def conjugate_transpose\n    self.transpose.complex_conjugate!\n  end\n\n  #\n  # call-seq:\n  #     absolute_sum -> Numeric\n  #\n  # == Arguments\n  #   - +incx+ -> the skip size (defaults to 1, no skip)\n  #   - +n+ -> the number of elements to include\n  #\n  # Return the sum of the contents of the vector. This is the BLAS asum routine.\n  def asum incx=1, n=nil\n    if self.shape == [1]\n      return self[0].abs unless self.complex_dtype?\n      return self[0].real.abs + self[0].imag.abs\n    end\n    return method_missing(:asum, incx, n) unless vector?\n    NMatrix::BLAS::asum(self, incx, self.size / incx)\n  end\n  alias :absolute_sum :asum\n\n  #\n  # call-seq:\n  #     norm2 -> Numeric\n  #\n  # == Arguments\n  #   - +incx+ -> the skip size (defaults to 1, no skip)\n  #   - +n+ -> the number of elements to include\n  #\n  # Return the 2-norm of the vector. This is the BLAS nrm2 routine.\n  def nrm2 incx=1, n=nil\n    self.twoDMat.getFrobeniusNorm()\n  end\n  alias :norm2 :nrm2\n\n  #\n  # call-seq:\n  #     scale! -> NMatrix\n  #\n  # == Arguments\n  #   - +alpha+ -> Scalar value used in the operation.\n  #   - +inc+ -> Increment used in the scaling function. Should generally be 1.\n  #   - +n+ -> Number of elements of +vector+.\n  #\n  # This is a destructive method, modifying the source NMatrix.  See also #scale.\n  # Return the scaling result of the matrix. BLAS scal will be invoked if provided.\n\n  def scale!(alpha, incx=1, n=nil)\n    #FIXME\n    # raise(DataTypeError, \"Incompatible data type for the scaling factor\") unless\n    #     NMatrix::upcast(self.dtype, NMatrix::min_dtype(alpha)) == self.dtype\n    raise(DataTypeError, \"Incompatible data type for the scaling factor\") if\n        self.dtype == :int8\n    @s.mapMultiplyToSelf(alpha)\n    return self\n  end\n\n  #\n  # call-seq:\n  #     scale -> NMatrix\n  #\n  # == Arguments\n  #   - +alpha+ -> Scalar value used in the operation.\n  #   - +inc+ -> Increment used in the scaling function. Should generally be 1.\n  #   - +n+ -> Number of elements of +vector+.\n  #\n  # Return the scaling result of the matrix. BLAS scal will be invoked if provided.\n\n  def scale(alpha, incx=1, n=nil)\n    # FIXME\n    # raise(DataTypeError, \"Incompatible data type for the scaling factor\") unless\n    #     NMatrix::upcast(self.dtype, NMatrix::min_dtype(alpha)) == self.dtype\n    raise(DataTypeError, \"Incompatible data type for the scaling factor\") if\n        self.dtype == :byte || self.dtype == :int8 || self.dtype == :int16 ||\n        self.dtype == :int32 || self.dtype == :int64\n    nmatrix = NMatrix.new :copy\n    nmatrix.shape = @shape.clone\n    nmatrix.s = ArrayRealVector.new(@s.toArray.clone).mapMultiplyToSelf(alpha)\n    return nmatrix\n  end\n\nend\n"
  },
  {
    "path": "lib/nmatrix/jruby/nmatrix_java.rb",
    "content": "require 'java'\nrequire_relative '../../../ext/nmatrix_java/vendor/commons-math3-3.6.1.jar'\nrequire_relative '../../../ext/nmatrix_java/target/nmatrix.jar'\n\njava_import 'org.apache.commons.math3.linear.ArrayRealVector'\njava_import 'org.apache.commons.math3.linear.RealMatrix'\njava_import 'org.apache.commons.math3.linear.MatrixUtils'\njava_import 'org.apache.commons.math3.linear.DecompositionSolver'\njava_import 'org.apache.commons.math3.linear.LUDecomposition'\njava_import 'org.apache.commons.math3.linear.QRDecomposition'\njava_import 'org.apache.commons.math3.linear.CholeskyDecomposition'\njava_import 'MatrixGenerator'\njava_import 'ArrayGenerator'\njava_import 'MathHelper'\njava_import 'ArrayComparator'\n\nclass NMatrix\n  include_package 'org.apache.commons.math3.analysis.function'\n  attr_accessor :shape, :dim, :dtype, :stype, :s\n\n  def initialize(*args)\n    if args[-1] == :copy\n      @shape = [2,2]\n      @s = [0,0,0,0]\n      @dim = shape.is_a?(Array) ? shape.length : 2\n    else\n      if (args.length <= 3)\n        @shape = args[0]\n        if args[1].is_a?(Array)\n          elements = args[1]\n          if args.length > 2\n            hash = args[2]\n            # puts hash\n            @dtype = hash[:dtype]\n            @stype = hash[:stype]\n          else\n            @dtype = :float64\n            @stype = :dense\n          end\n        else\n          # elements = Java::double[shape[0]*shape[1]].new{ Java::Double.NaN }\n          if args.length > 1\n            if args[1].is_a?(Symbol)\n              hash = args[1]\n              @dtype = hash[:dtype]\n              @stype = hash[:stype]\n              elements = Array.new(shape*shape) unless shape.is_a? Array\n            else\n              elements = Array.new(shape*shape) unless shape.is_a? Array\n            end\n          end\n        end\n      else\n\n        offset = 0\n        if (!args[0].is_a?(Symbol) && !args[0].is_a?(String))\n          @stype = :dense\n        else\n          offset = 1\n          @stype = :dense\n          @dtype = args[-1]\n        end\n\n        @shape = args[offset]\n        elements = args[offset+1]\n\n      end\n\n\n      @shape = [shape,shape] unless shape.is_a?(Array)\n      # @dtype = interpret_dtype(argc-1-offset, argv+offset+1, stype);\n      # @dtype = args[:dtype] if args[:dtype]\n      @dtype_sym = nil\n      @stype_sym = nil\n      @default_val_num = nil\n      @capacity_num = nil\n      @size = (0...@shape.size).inject(1) { |x,i| x * @shape[i] }\n\n      j=0\n\n      if (elements.is_a?(ArrayRealVector))\n        @s = elements\n      # elsif elements.java_class.to_s == \"[D\"\n      #   @s = ArrayRealVector.new(elements)\n      else\n        storage = Array.new(size)\n        elements = [elements,elements] unless elements.is_a?(Array)\n        if size > elements.length\n          (0...size).each do |i|\n            j=0 unless j!=elements.length\n            storage[i] = elements[j]\n            j+=1\n          end\n        else\n          storage = elements\n        end\n        if @dtype == :object\n          @s = storage\n        else\n          @s = ArrayRealVector.new(storage.to_java Java::double)\n        end\n      end\n\n      @dim = @shape.is_a?(Array) ? @shape.length : 2\n\n    end\n  end\n\n  # Needs to be properly implemented\n  def self.min_dtype(alpha)\n    :int8\n  end\n\n  def self.upcast(alpha, beta)\n    false\n  end\n\n  def clone\n    result = create_dummy_nmatrix\n    # ArrayRealVector#clone is disable, hence use copy\n    # that returns a deep copy of the object.\n    result.s = @s.copy\n    return result\n  end\n\n  def entries\n    return @s.toArray.to_a\n  end\n\n  def twoDMat\n    return MatrixUtils.createRealMatrix MatrixGenerator.getMatrixDouble(self.s.toArray, @shape[0], @shape[1])\n  end\n\n  def dtype\n    return @dtype\n  end\n\n  #FIXME\n  def self.guess_dtype arg\n    :float32\n  end\n\n  def stype\n    @stype = :dense\n  end\n\n  def cast_full *args\n    if args.is_a? Hash\n      self.dtype = args[:dtype]\n    else\n      self.dtype = args[1]\n    end\n    return self\n  end\n\n  def default_value\n    return nil\n  end\n\n  def __list_default_value__\n    #not implemented currently\n  end\n\n  def __yale_default_value__\n    #not implemented currently\n  end\n\n  def [] *args\n    return xslice(args)\n  end\n\n  def slice(*args)\n    return xslice(args)\n  end\n\n  def []=(*args)\n    to_return = nil\n    if args.length > @dim+1\n      raise(ArgumentError, \"wrong number of arguments (#{args.length} for #{effective_dim(dim+1)})\" )\n    else\n      slice = get_slice(@dim, args, @shape)\n      dense_storage_set(slice, args[-1])\n      to_return = args[-1]\n    end\n    return to_return\n  end\n\n  def is_ref?\n\n  end\n\n  # def dim\n  #   shape.is_a?(Array) ? shape.length : 2\n  # end\n\n  alias :dimensions :dim\n\n  def effective_dim(s)\n    d = 0\n    (0...@dim).each do |i|\n      d+=1 unless @shape[i] == 1\n    end\n    return d\n  end\n\n  alias :effective_dimensions :effective_dim\n\n\n\n  protected\n\n  def create_dummy_nmatrix\n    nmatrix = NMatrix.new(:copy)\n    nmatrix.shape = self.shape\n    nmatrix.dim = self.dim\n    nmatrix.dtype = self.dtype\n    nmatrix.stype = self.stype\n    return nmatrix\n  end\n\n  def __list_to_hash__\n\n  end\n\n  public\n\n  def shape\n    @shape\n  end\n\n   def supershape s\n    if (s[:src] == @s)\n      return shape\n       # easy case (not a slice)\n    else\n      @s = s[:src]\n    end\n\n    new_shape = Array.new(dim)\n    (0...dim).each do |index|\n      new_shape[index] = shape[index]\n    end\n\n    return new_shape\n  end\n\n  def offset\n    # ArrayRealVector takes care of the offset value when indexing a Vector.\n    # Hence, return 0.\n    0\n  end\n\n  def det_exact\n    # if (:stype != :dense)\n    #   raise Exception.new(\"can only calculate exact determinant for dense matrices\")\n    #   return nil\n    # end\n    raise(DataTypeError, \"cannot call det_exact on unsigned type\") if(self.dtype == :byte)\n    if (@dim != 2 || @shape[0] != @shape[1])\n      raise(ShapeError, \"matrices must be square to have a determinant defined\")\n      return nil\n    end\n    to_return = nil\n    if (dtype == :object)\n      # to_return = *reinterpret_cast<VALUE*>(result);\n    else\n      to_return = LUDecomposition.new(self.twoDMat).getDeterminant()\n    end\n\n    return to_return.round(3)\n  end\n\n  def det_exact2\n    if (@dim != 2 || @shape[0] != @shape[1])\n      raise(ShapeError, \"matrices must be square to have a determinant defined\")\n      return nil\n    end\n    to_return = nil\n    if (dtype == :object)\n      # to_return = *reinterpret_cast<VALUE*>(result);\n    else\n      to_return = LUDecomposition.new(self.twoDMat).getDeterminant()\n    end\n\n    return to_return.round(3)\n  end\n\n  def complex_conjugate!\n\n  end\n\n\n  protected\n\n  def count_max_elements\n    return size\n  end\n\n  def reshape_bang arg\n    if(@stype == :dense)\n      shape_ary = arg\n      size = count_max_elements\n      new_size = 1\n      shape = interpret_shape(shape_ary, dim)\n\n      (0...dim).each do |index|\n        new_size *= shape[index]\n      end\n\n      if (size == new_size)\n        self.shape = shape\n        self.dim = dim\n        return self\n      else\n         raise(ArgumentError, \"reshape cannot resize; size of new and old matrices must match\")\n      end\n    else\n      raise(NotImplementedError, \"reshape in place only for dense stype\")\n    end\n  end\n\n  def interpret_shape(shape_ary, dim)\n    shape = []\n\n    if shape_ary.is_a?(Array)\n      dim = shape_ary.length\n\n      (0...dim).each do |index|\n        shape[index] = shape_ary[index].to_i\n      end\n\n    elsif shape_ary.is_a?(FIXNUM)\n      dim = 2\n      shape = Array.new(dim)\n      shape[0] = shape_ary.to_i\n      shape[1] = shape_ary.to_i\n    else\n      raise(ArgumentError, \"Expected an array of numbers or a single Fixnum for matrix shape\")\n    end\n\n    return shape\n  end\n\n\n  public\n\n  def each_with_indices\n    nmatrix = create_dummy_nmatrix\n    stride = get_stride(self)\n    offset = 0\n    #Create indices and initialize them to zero\n    coords = Array.new(dim){ 0 }\n\n    shape_copy =  Array.new(dim)\n    (0...size).each do |k|\n      dense_storage_coords(nmatrix, k, coords, stride, offset)\n      slice_index = dense_storage_pos(coords,stride)\n      ary = Array.new\n      if (@dtype == :object)\n        ary << self.s[slice_index]\n      else\n        ary << self.s.toArray.to_a[slice_index]\n      end\n      (0...dim).each do |p|\n        ary << coords[p]\n      end\n\n      # yield the array which now consists of the value and the indices\n      yield(ary)\n    end if block_given?\n\n    return nmatrix\n  end\n\n\n  def each_stored_with_indices\n    nmatrix = create_dummy_nmatrix\n    stride = get_stride(self)\n    offset = 0\n    #Create indices and initialize them to zero\n    coords = Array.new(dim){ 0 }\n\n    shape_copy =  Array.new(dim)\n\n    (0...size).each do |k|\n      dense_storage_coords(nmatrix, k, coords, stride, offset)\n      slice_index = dense_storage_pos(coords,stride)\n      ary = Array.new\n      if (@dtype == :object)\n        ary << self.s[slice_index]\n      else\n        ary << self.s.toArray.to_a[slice_index]\n      end\n      (0...dim).each do |p|\n        ary << coords[p]\n      end\n      # yield the array which now consists of the value and the indices\n      yield(ary)\n    end if block_given?\n\n    return nmatrix\n  end\n\n  def map_stored\n\n  end\n\n  def each_ordered_stored_with_indices\n\n  end\n\n\n  protected\n\n  def __dense_each__\n    nmatrix = create_dummy_nmatrix\n    stride = get_stride(self)\n    offset = 0\n    #Create indices and initialize them to zero\n    coords = Array.new(dim){ 0 }\n\n    shape_copy =  Array.new(dim)\n    (0...size).each do |k|\n      if (@dtype == :object)\n        dense_storage_coords(nmatrix, k, coords, stride, offset)\n        slice_index = dense_storage_pos(coords,stride)\n        yield self.s[slice_index]\n      else\n        dense_storage_coords(nmatrix, k, coords, stride, offset)\n        slice_index = dense_storage_pos(coords,stride)\n        yield self.s.toArray.to_a[slice_index]\n      end\n    end if block_given?\n    if (@dtype == :object)\n      return @s.to_enum\n    else\n      return @s.toArray().to_a.to_enum\n    end\n  end\n\n  def __dense_map__\n    nmatrix = create_dummy_nmatrix\n    stride = get_stride(self)\n    offset = 0\n    coords = Array.new(dim){ 0 }\n    shape_copy =  Array.new(dim)\n\n    s= Java::double[size].new\n    (0...size).each do |k|\n      dense_storage_coords(nmatrix, k, coords, stride, offset)\n      slice_index = dense_storage_pos(coords,stride)\n\n      y = yield @s.getEntry(slice_index)\n      @s.setEntry(slice_index, y)\n    end\n    nmatrix.s = ArrayRealVector.new s\n\n    return nmatrix\n  end\n\n  def __dense_map_pair__\n\n  end\n\n  def __list_map_merged_stored__\n\n  end\n\n  def __list_map_stored__\n\n  end\n\n  def __yale_map_merged_stored__\n\n  end\n\n  def __yale_map_stored__\n\n  end\n\n  def __yale_stored_diagonal_each_with_indices__\n\n  end\n\n  def __yale_stored_nondiagonal_each_with_indices__\n\n  end\n\n\n  public\n\n  def ==(otherNmatrix)\n    result = false\n    if (otherNmatrix.is_a?(NMatrix))\n      #check dimension\n      if (@dim != otherNmatrix.dim)\n        raise(ShapeError, \"cannot compare matrices with different dimension\")\n      end\n      #check shape\n      (0...dim).each do |i|\n        if (@shape[i] != otherNmatrix.shape[i])\n          raise(ShapeError, \"cannot compare matrices with different shapes\");\n        end\n      end\n\n      #check the entries\n      if dtype == :object\n        result = @s == otherNmatrix.s\n      else\n        result = ArrayComparator.equals(@s.toArray, otherNmatrix.s.toArray)\n      end\n    end\n    result\n  end\n\n  def =~ (other)\n    lha = @s.toArray.to_a\n    rha = other.s.toArray.to_a\n    resultArray = Array.new(lha.length)\n    if (other.is_a?(NMatrix))\n      #check dimension\n      if (@dim != other.dim)\n        raise(ShapeError, \"cannot compare matrices with different dimension\")\n        return nil\n      end\n      #check shape\n      (0...dim).each do |i|\n        if (@shape[i] != other.shape[i])\n          raise(ShapeError, \"cannot compare matrices with different shapes\");\n          return nil\n        end\n      end\n      #check the entries\n      (0...lha.length).each do |i|\n        resultArray[i] = lha[i] == rha[i] ? true : false\n      end\n      result = NMatrix.new(:copy)\n      result.shape = @shape\n      result.dtype = :object\n      result.s = resultArray\n    end\n    result\n  end\n\n  def !~ (other)\n    lha = @s.toArray.to_a\n    rha = other.s.toArray.to_a\n    resultArray = Array.new(lha.length)\n    if (other.is_a?(NMatrix))\n      #check dimension\n      if (@dim != other.dim)\n        raise(ShapeError, \"cannot compare matrices with different dimension\")\n        return nil\n      end\n      #check shape\n      (0...dim).each do |i|\n        if (@shape[i] != other.shape[i])\n          raise(ShapeError, \"cannot compare matrices with different shapes\");\n          return nil\n        end\n      end\n      #check the entries\n      (0...lha.length).each do |i|\n        resultArray[i] = lha[i] != rha[i] ? true : false\n      end\n      result = NMatrix.new(:copy)\n      result.shape = @shape\n      result.dtype = :object\n      result.s = resultArray\n    end\n    result\n  end\n\n  def <= (other)\n    lha = @s.toArray.to_a\n    rha = other.s.toArray.to_a\n    resultArray = Array.new(lha.length)\n    if (other.is_a?(NMatrix))\n      #check dimension\n      if (@dim != other.dim)\n        raise(ShapeError, \"cannot compare matrices with different dimension\")\n        return nil\n      end\n      #check shape\n      (0...dim).each do |i|\n        if (@shape[i] != other.shape[i])\n          raise(ShapeError, \"cannot compare matrices with different shapes\");\n          return nil\n        end\n      end\n      #check the entries\n      (0...lha.length).each do |i|\n        resultArray[i] = lha[i] <= rha[i] ? true : false\n      end\n      result = NMatrix.new(:copy)\n      result.shape = @shape\n      result.dtype = :object\n      result.s = resultArray\n    end\n    result\n  end\n\n  def >= (other)\n    lha = @s.toArray.to_a\n    rha = other.s.toArray.to_a\n    resultArray = Array.new(lha.length)\n    if (other.is_a?(NMatrix))\n      #check dimension\n      if (@dim != other.dim)\n        raise(ShapeError, \"cannot compare matrices with different dimension\")\n        return nil\n      end\n      #check shape\n      (0...dim).each do |i|\n        if (@shape[i] != other.shape[i])\n          raise(ShapeError, \"cannot compare matrices with different shapes\");\n          return nil\n        end\n      end\n      #check the entries\n      (0...lha.length).each do |i|\n        resultArray[i] = lha[i] >= rha[i] ? true : false\n      end\n      result = NMatrix.new(:copy)\n      result.shape = @shape\n      result.dtype = :object\n      result.s = resultArray\n    end\n    result\n  end\n\n  def < (other)\n    lha = @s.toArray.to_a\n    rha = other.s.toArray.to_a\n    resultArray = Array.new(lha.length)\n    if (other.is_a?(NMatrix))\n      #check dimension\n      if (@dim != other.dim)\n        raise(ShapeError, \"cannot compare matrices with different dimension\")\n        return nil\n      end\n      #check shape\n      (0...dim).each do |i|\n        if (@shape[i] != other.shape[i])\n          raise(ShapeError, \"cannot compare matrices with different shapes\");\n          return nil\n        end\n      end\n      #check the entries\n      (0...lha.length).each do |i|\n        resultArray[i] = lha[i] < rha[i] ? true : false\n      end\n      result = NMatrix.new(:copy)\n      result.shape = @shape\n      result.dtype = :object\n      result.s = resultArray\n    end\n    result\n  end\n\n  def > (other)\n    lha = @s.toArray.to_a\n    rha = other.s.toArray.to_a\n    resultArray = Array.new(lha.length)\n    if (other.is_a?(NMatrix))\n      #check dimension\n      if (@dim != other.dim)\n        raise(ShapeError, \"cannot compare matrices with different dimension\")\n        return nil\n      end\n      #check shape\n      (0...dim).each do |i|\n        if (@shape[i] != other.shape[i])\n          raise(ShapeError, \"cannot compare matrices with different shapes\");\n          return nil\n        end\n      end\n      #check the entries\n      (0...lha.length).each do |i|\n        resultArray[i] = lha[i] > rha[i] ? true : false\n      end\n      result = NMatrix.new(:copy)\n      result.shape = @shape\n      result.dtype = :object\n      result.s = resultArray\n    end\n    result\n  end\n\n  # /////////////////////////\n  # // Matrix Math Methods //\n  # /////////////////////////\n\n  def dot(other)\n    result = nil\n    if (other.is_a?(NMatrix))\n      #check dimension\n      if (@shape.length!=2 || other.shape.length!=2)\n        raise(NotImplementedError, \"please convert array to nx1 or 1xn NMatrix first\")\n        return nil\n      end\n      #check shape\n      if (@shape[1] != other.shape[0])\n        raise(ArgumentError, \"incompatible dimensions\")\n        return nil\n      end\n\n      # if(@stype != other.stype)\n      #   raise(NotImplementedError, \"matrices must have same stype\")\n      # end\n\n      result = create_dummy_nmatrix\n      result.shape = [@shape[0],other.shape[1]]\n      twoDMat = self.twoDMat.multiply(other.twoDMat)\n      result.s = ArrayRealVector.new(ArrayGenerator.getArrayDouble(twoDMat.getData, @shape[0],other.shape[1]))\n    else\n      raise(ArgumentError, \"cannot have dot product with a scalar\");\n    end\n    return result;\n  end\n\n  def symmetric?\n    return is_symmetric(false)\n  end\n\n  def is_symmetric(hermitian)\n    is_symmetric = true\n\n    if (@shape[0] == @shape[1] and @dim == 2)\n      if @stype == :dense\n        if (hermitian)\n          #Currently, we are not dealing with complex matrices.\n          eps = 0\n          is_symmetric = MatrixUtils.isSymmetric(self.twoDMat, eps)\n        else\n          eps = 0\n          is_symmetric = MatrixUtils.isSymmetric(self.twoDMat, eps)\n        end\n\n      else\n        #TODO: Implement, at the very least, yale_is_symmetric. Model it after yale/transp.template.c.\n        # raise Exception.new(\"symmetric? and hermitian? only implemented for dense currently\")\n      end\n    end\n    return is_symmetric ? true : false\n  end\n\n  def hermitian?\n    return is_symmetric(true)\n  end\n\n  def capacity\n\n  end\n\n  # // protected methods\n\n  protected\n\n  def __inverse__(matrix, bool =true)\n    # if (:stype != :dense)\n    #   raise Exception.new(\"needs exact determinant implementation for this matrix stype\")\n    #   return nil\n    # end\n\n    if (@dim != 2 || @shape[0] != @shape[1])\n      raise Exception.new(\"matrices must be square to have an inverse defined\")\n      return nil\n    end\n    to_return = nil\n    if (dtype == :RUBYOBJ)\n      # to_return = *reinterpret_cast<VALUE*>(result);\n    else\n      to_return = create_dummy_nmatrix\n      twoDMat = MatrixUtils.inverse(matrix.twoDMat)\n      to_return.s = ArrayRealVector.new(ArrayGenerator.getArrayDouble(twoDMat.getData, @shape[0], @shape[1]))\n    end\n\n    return to_return\n  end\n\n  def __inverse__!\n    # if (:stype != :dense)\n    #   raise Exception.new(\"needs exact determinant implementation for this matrix stype\")\n    #   return nil\n    # end\n\n    if (@dim != 2 || @shape[0] != @shape[1])\n      raise Exception.new(\"matrices must be square to have an inverse defined\")\n      return nil\n    end\n    to_return = nil\n    if (dtype == :RUBYOBJ)\n      # to_return = *reinterpret_cast<VALUE*>(result);\n    else\n      twoDMat = MatrixUtils.inverse(self.twoDMat)\n      @s = ArrayRealVector.new(ArrayGenerator.getArrayDouble(twoDMat.getData, @shape[0], @shape[1]))\n    end\n\n    return self\n  end\n\n  def __inverse_exact__\n    # if (:stype != :dense)\n    #   raise Exception.new(\"needs exact determinant implementation for this matrix stype\")\n    #   return nil\n    # end\n\n    if (@dim != 2 || @shape[0] != @shape[1])\n      raise Exception.new(\"matrices must be square to have an inverse defined\")\n      return nil\n    end\n    to_return = nil\n    if (dtype == :RUBYOBJ)\n      # to_return = *reinterpret_cast<VALUE*>(result);\n    else\n      to_return = create_dummy_nmatrix\n      twoDMat = MatrixUtils.inverse(self.twoDMat)\n      to_return.s = ArrayRealVector.new(ArrayGenerator.getArrayDouble(twoDMat.getData, @shape[0], @shape[1]))\n    end\n\n    return to_return\n\n  end\n\n  private\n\n  # // private methods\n\n  def __hessenberg__(param)\n    raise(NotImplementedError, \"Hessenberg Transformer not implemented for NMatrix-JRuby\")\n  end\nend\n\n# load jruby implementation of operators.\nrequire_relative './slice.rb'\nrequire_relative './operators.rb'\nrequire_relative './decomposition.rb'\nrequire_relative './error.rb'\nrequire_relative './enumerable.rb'"
  },
  {
    "path": "lib/nmatrix/jruby/operators.rb",
    "content": "class NMatrix\n\n  # A dummy matrix is a matrix without the elements atrribute.\n  # NMatrix#create_dummy_matrix prevents creating copies as @s is set explicitly.\n  def +(other)\n    result = create_dummy_nmatrix\n    if (other.is_a?(NMatrix))\n      #check dimension\n      raise(ShapeError, \"Cannot add matrices with different dimension\") if (@dim != other.dim)\n      #check shape\n      (0...dim).each do |i|\n        raise(ShapeError, \"Cannot add matrices with different shapes\") if (@shape[i] != other.shape[i])\n      end\n      result.s = @s.copy.add(other.s)\n    else\n      result.s = @s.copy.mapAddToSelf(other)\n    end\n    result\n  end\n\n  def -(other)\n    result = create_dummy_nmatrix\n    if (other.is_a?(NMatrix))\n      #check dimension\n      raise(ShapeError, \"Cannot subtract matrices with different dimension\") if (@dim != other.dim)\n      #check shape\n      (0...dim).each do |i|\n        raise(ShapeError, \"Cannot subtract matrices with different shapes\") if (@shape[i] != other.shape[i])\n      end\n      result.s = @s.copy.subtract(other.s)\n    else\n      result.s = @s.copy.mapSubtractToSelf(other)\n    end\n    result\n  end\n\n  def *(other)\n    result = create_dummy_nmatrix\n    if (other.is_a?(NMatrix))\n      #check dimension\n      raise(ShapeError, \"Cannot multiply matrices with different dimension\") if (@dim != other.dim)\n      #check shape\n      (0...dim).each do |i|\n        raise(ShapeError, \"Cannot multiply matrices with different shapes\") if (@shape[i] != other.shape[i])\n      end\n      result.s = @s.copy.ebeMultiply(other.s)\n    else\n      result.s = @s.copy.mapMultiplyToSelf(other)\n    end\n    result\n  end\n\n  def /(other)\n    result = create_dummy_nmatrix\n    if (other.is_a?(NMatrix))\n      #check dimension\n      raise(ShapeError, \"Cannot divide matrices with different dimension\") if (@dim != other.dim)\n      #check shape\n      (0...dim).each do |i|\n        raise(ShapeError, \"Cannot divide matrices with different shapes\") if (@shape[i] != other.shape[i])\n      end\n      result.s = @s.copy.ebeDivide(other.s)\n    else\n      result.s = @s.copy.mapDivideToSelf(other)\n    end\n    result\n  end\n\n  def ** val\n    result = NMatrix.new(:copy)\n    result.shape = @shape\n    result.dim = @dim\n    result.s = @s.copy.mapToSelf(Power.new(val))\n    result\n  end\n\n  def %(other)\n    raise Exception.new(\"modulus not supported in NMatrix-jruby\")\n  end\n\n  def atan2(other, scalar=false)\n    result = create_dummy_nmatrix\n    if scalar\n      result.s = ArrayRealVector.new MathHelper.atan2Scalar(other, @s.toArray)\n    else\n      if other.is_a? NMatrix\n        result.s = ArrayRealVector.new MathHelper.atan2(other.s.toArray, @s.toArray)\n      else\n        result.s = ArrayRealVector.new MathHelper.atan2Scalar2(other, @s.toArray)\n      end\n    end\n    result\n  end\n\n  def ldexp(other, scalar=false)\n    result = create_dummy_nmatrix\n    if scalar\n      result.s = ArrayRealVector.new MathHelper.ldexpScalar(other, @s.toArray)\n    else\n      if other.is_a? NMatrix\n        result.s = ArrayRealVector.new MathHelper.ldexp(other.s.toArray, @s.toArray)\n      else\n        result.s = ArrayRealVector.new MathHelper.ldexpScalar2(other, @s.toArray)\n      end\n    end\n    result\n  end\n\n  def hypot(other, scalar=false)\n    result = create_dummy_nmatrix\n    if scalar\n      result.s = ArrayRealVector.new MathHelper.hypotScalar(other, @s.toArray)\n    else\n      if other.is_a? NMatrix\n        result.s = ArrayRealVector.new MathHelper.hypot(other.s.toArray, @s.toArray)\n      else\n        result.s = ArrayRealVector.new MathHelper.hypotScalar(other, @s.toArray)\n      end\n    end\n    result\n  end\n\n  def sin\n    result = create_dummy_nmatrix\n    result.s = @s.copy.mapToSelf(Sin.new())\n    result\n  end\n\n  def cos\n    result = create_dummy_nmatrix\n    result.s = @s.copy.mapToSelf(Cos.new())\n    result\n  end\n\n  def tan\n    result = create_dummy_nmatrix\n    result.s = @s.copy.mapToSelf(Tan.new())\n    result\n  end\n\n  def asin\n    result = create_dummy_nmatrix\n    result.s = @s.copy.mapToSelf(Asin.new())\n    result\n  end\n\n  def acos\n    result = create_dummy_nmatrix\n    result.s = @s.copy.mapToSelf(Acos.new())\n    result\n  end\n\n  def atan\n    result = create_dummy_nmatrix\n    result.s = @s.copy.mapToSelf(Atan.new())\n    result\n  end\n\n  def sinh\n    result = create_dummy_nmatrix\n    result.s = @s.copy.mapToSelf(Sinh.new())\n    result\n  end\n\n  def cosh\n    result = create_dummy_nmatrix\n    result.s = @s.copy.mapToSelf(Cosh.new())\n    result\n  end\n\n  def tanh\n    result = NMatrix.new(:copy)\n    result.shape = @shape\n    result.dim = @dim\n    result.s = @s.copy.mapToSelf(Tanh.new())\n    result\n  end\n\n  def asinh\n    result = create_dummy_nmatrix\n    result.s = @s.copy.mapToSelf(Asinh.new())\n    result\n  end\n\n  def acosh\n    result = create_dummy_nmatrix\n    result.s = @s.copy.mapToSelf(Acosh.new())\n    result\n  end\n\n  def atanh\n    result = create_dummy_nmatrix\n    result.s = @s.copy.mapToSelf(Atanh.new())\n    result\n  end\n\n  def exp\n    result = create_dummy_nmatrix\n    result.s = @s.copy.mapToSelf(Exp.new())\n    result\n  end\n\n  def log(val = :natural)\n    result = create_dummy_nmatrix\n    if val == :natural\n      result.s = @s.copy.mapToSelf(Log.new())\n    else\n      result.s = ArrayRealVector.new MathHelper.log(val, @s.toArray)\n    end\n    result\n  end\n\n  def log2\n    self.log(2)\n  end\n\n  def log10\n    result = create_dummy_nmatrix\n    result.s = @s.copy.mapToSelf(Log10.new())\n    result\n  end\n\n  def sqrt\n    result = create_dummy_nmatrix\n    result.s = @s.copy.mapToSelf(Sqrt.new())\n    result\n  end\n\n  def erf\n    result = create_dummy_nmatrix\n    result.s = ArrayRealVector.new MathHelper.erf(@s.toArray)\n    result\n  end\n\n  def erfc\n    result = create_dummy_nmatrix\n    result.s = ArrayRealVector.new MathHelper.erfc(@s.toArray)\n    result\n  end\n\n  def cbrt\n    result = create_dummy_nmatrix\n    result.s = @s.copy.mapToSelf(Cbrt.new())\n    result\n  end\n\n  def gamma\n    result = create_dummy_nmatrix\n    result.s = ArrayRealVector.new MathHelper.gamma(@s.toArray)\n    result\n  end\n\n  def -@\n    result = create_dummy_nmatrix\n    result.s = @s.copy.mapMultiplyToSelf(-1)\n    result\n  end\n\n  def floor\n    result = create_dummy_nmatrix\n    # Need to be changed later\n    result.dtype = :int64\n    result.s = @s.copy.mapToSelf(Floor.new())\n    result\n  end\n\n  def ceil\n    result = create_dummy_nmatrix\n    # Need to be changed later\n    result.dtype = :int64\n    result.s = @s.copy.mapToSelf(Ceil.new())\n    result\n  end\n\n  def round\n    result = create_dummy_nmatrix\n    # Need to be changed later\n    result.dtype = :int64\n    result.s = ArrayRealVector.new MathHelper.round(@s.toArray)\n    result\n  end\n\nend"
  },
  {
    "path": "lib/nmatrix/jruby/slice.rb",
    "content": "class NMatrix\n\n  def get_slice(dim, args, shape_array)\n    slice = {}\n    slice[:coords]=[]\n    slice[:lengths]=[]\n    slice[:single] = true\n\n    argc = args.length\n\n    t = 0\n    (0...dim).each do |r|\n      v = t == argc ? nil : args[t]\n\n      if(argc - t + r < dim && shape_array[r] ==1)\n        slice[:coords][r]  = 0\n        slice[:lengths][r] = 1\n      elsif v.is_a?(Fixnum)\n        v_ = v.to_i.to_int\n        if (v_ < 0) # checking for negative indexes\n          slice[:coords][r]  = shape_array[r]+v_\n        else\n          slice[:coords][r]  = v_\n        end\n        slice[:lengths][r] = 1\n        t+=1\n      elsif (v.is_a?(Symbol) && v == :*)\n        slice[:coords][r] = 0\n        slice[:lengths][r] = shape_array[r]\n        slice[:single] = false\n        t+=1\n      elsif v.is_a?(Range)\n        begin_ = v.begin\n        end_ = v.end\n        excl = v.exclude_end?\n        slice[:coords][r] = (begin_ < 0) ? shape[r] + begin_ : begin_\n\n        # Exclude last element for a...b range\n        if (end_ < 0)\n          slice[:lengths][r] = shape_array[r] + end_ - slice[:coords][r] + (excl ? 0 : 1)\n        else\n          slice[:lengths][r] = end_ - slice[:coords][r] + (excl ? 0 : 1)\n        end\n\n        slice[:single] = false\n        t+=1\n      else\n        raise(ArgumentError, \"expected Fixnum or Range for slice component instead of #{v.class}\")\n      end\n\n      if (slice[:coords][r] > shape_array[r] || slice[:coords][r] + slice[:lengths][r] > shape_array[r])\n        raise(RangeError, \"slice is larger than matrix in dimension #{r} (slice component #{t})\")\n      end\n    end\n\n    return slice\n  end\n\n  def get_stride(nmatrix)\n    stride = Array.new()\n    (0...nmatrix.dim).each do |i|\n      stride[i] = 1;\n      (i+1...dim).each do |j|\n        stride[i] *= nmatrix.shape[j]\n      end\n    end\n    stride\n  end\n\n  def xslice(args)\n    result = nil\n\n    if self.dim < args.length\n      raise(ArgumentError,\"wrong number of arguments (#{args} for #{effective_dim(self)})\")\n    else\n      result = Array.new()\n\n      slice = get_slice(@dim, args, @shape)\n      stride = get_stride(self)\n      if slice[:single]\n        if (@dtype == :object)\n          result = @s[dense_storage_get(slice,stride)]\n        else\n          s = @s.toArray().to_a\n          result = @s.getEntry(dense_storage_get(slice,stride))\n        end\n      else\n        result = dense_storage_get(slice,stride)\n      end\n    end\n    return result\n  end\n\n  #its by ref\n  def xslice_ref(args)\n    result = nil\n\n    if self.dim < args.length\n      raise(ArgumentError,\"wrong number of arguments (#{args} for #{effective_dim(self)})\")\n    else\n      result = Array.new()\n\n      slice = get_slice(@dim, args, @shape)\n      stride = get_stride(self)\n      if slice[:single]\n        if (@dtype == :object)\n          result = @s[dense_storage_get(slice,stride)]\n        else\n          result = @s.getEntry(dense_storage_get(slice,stride))\n        end\n      else\n        result = dense_storage_ref(slice,stride)\n      end\n    end\n    return result\n  end\n\n  def dense_storage_get(slice,stride)\n    if slice[:single]\n      return dense_storage_pos(slice[:coords],stride)\n    else\n      shape = @shape.dup\n      (0...@dim).each do |i|\n        shape[i] = slice[:lengths][i]\n      end\n      psrc = dense_storage_pos(slice[:coords], stride)\n      src = {}\n      result = NMatrix.new(:copy)\n      result.dim = dim\n      result.dtype = @dtype\n      resultShape= Array.new(dim)\n      (0...dim).each do |i|\n        resultShape[i]  = slice[:lengths][i]\n      end\n      result.shape = resultShape\n      dest = {}\n      src[:stride] = get_stride(self)\n      if (@dtype == :object)\n        src[:elements] = @s\n      else\n        src[:elements] = @s.toArray().to_a\n      end\n      dest[:stride] = get_stride(result)\n      dest[:shape] = resultShape\n      dest[:elements] = []\n      temp = []\n      s = (slice_copy(src, dest, slice[:lengths], 0, psrc,0))\n      # if\n      # arr = Java::double[s.length].new\n      if (@dtype == :object)\n        arr = Java::boolean[s.length].new\n      else\n        arr = Java::double[s.length].new\n      end\n      (0...s.length).each do |i|\n        arr[i] = s[i]\n      end\n      if (@dtype == :object)\n        result.s = arr\n      else\n        result.s = ArrayRealVector.new(arr)\n      end\n\n      return result\n    end\n  end\n\n  def slice_copy(src, dest,lengths, pdest, psrc,n)\n    if @dim-n>1\n      (0...lengths[n]).each do |i|\n        slice_copy(src, dest, lengths,pdest+dest[:stride][n]*i,psrc+src[:stride][n]*i,n+1)\n      end\n    else\n      (0...dest[:shape][n]).each do |p|\n        dest[:elements][p+pdest] = src[:elements][p+psrc]\n      end\n    end\n    dest[:elements]\n  end\n\n  def dense_storage_coords(s, slice_pos, coords_out, stride, offset)  #array, int, array\n    temp_pos = slice_pos;\n\n    (0...dim).each do |i|\n      coords_out[i] = (temp_pos - temp_pos % stride[i])/stride[i] - offset[i];\n      temp_pos = temp_pos % stride[i]\n    end\n\n    return temp_pos\n  end\n\n  def dense_storage_pos(coords,stride)\n    pos = 0;\n    offset = 0\n    (0...@dim).each do |i|\n      pos += coords[i]  * stride[i] ;\n    end\n    return pos + offset;\n  end\n\n  def slice_set(dest, lengths, pdest, rank, v, v_size, v_offset)\n    if (dim - rank > 1)\n      (0...lengths[rank]).each do |i|\n        slice_set(dest, lengths, pdest + dest[:stride][rank] * i, rank + 1, v, v_size, v_offset);\n      end\n    else\n      (0...lengths[rank]).each do |p|\n        v_offset %= v_size if(v_offset >= v_size)\n        # elem = dest[:elements]\n        # elem[p + pdest] = v[v_offset]\n        if @dtype == :object\n          @s[p + pdest] = v[v_offset]\n        else\n          @s.setEntry(p + pdest, v[v_offset])\n        end\n        v_offset += 1\n      end\n    end\n  end\n\n  def dense_storage_set(slice, right)\n    stride = get_stride(self)\n    v_size = 1\n\n    if right.is_a?(NMatrix)\n      right = right.s.toArray.to_a\n    end\n\n    if(right.is_a?(Array))\n      v_size = right.length\n      v = right\n      if (dtype == :RUBYOBJ)\n        # nm_register_values(reinterpret_cast<VALUE*>(v), v_size)\n      end\n\n      (0...v_size).each do |m|\n        v[m] = right[m]\n      end\n    else\n      v = [right]\n      if (@dtype == :RUBYOBJ)\n        # nm_register_values(reinterpret_cast<VALUE*>(v), v_size)\n      end\n    end\n    if(slice[:single])\n      # reinterpret_cast<D*>(s->elements)[nm_dense_storage_pos(s, slice->coords)] = v;\n      pos = dense_storage_pos(slice[:coords],stride)\n      if @dtype == :object\n        @s[pos] = v[0]\n      else\n        @s.setEntry(pos, v[0])\n      end\n    else\n      v_offset = 0\n      dest = {}\n      dest[:stride] = get_stride(self)\n      dest[:shape] = shape\n      # dest[:elements] = @s.toArray().to_a\n      dense_pos = dense_storage_pos(slice[:coords],stride)\n      slice_set(dest, slice[:lengths], dense_pos, 0, v, v_size, v_offset)\n    end\n  end\n\nend"
  },
  {
    "path": "lib/nmatrix/lapack_core.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == lapack_core.rb\n#\n# This file contains friendlier interfaces to LAPACK functions\n# implemented in C.\n# This file is only for functions available with the core nmatrix gem\n# (no external libraries needed).\n#\n# Note: most of these functions are borrowed from ATLAS, which is available under a BSD-\n# style license.\n#++\n\nclass NMatrix\n\n  module LAPACK\n\n    #Add functions from C extension to main LAPACK module\n    class << self\n      NMatrix::Internal::LAPACK.singleton_methods.each do |m|\n        define_method m, NMatrix::Internal::LAPACK.method(m).to_proc\n      end\n    end\n\n    class << self\n      # Solve the matrix equation AX = B, where A is a symmetric (or Hermitian)\n      # positive-definite matrix. If A is a nxn matrix, B must be mxn.\n      # Depending on the value of uplo, only the upper or lower half of +a+\n      # is read.\n      # This uses the Cholesky decomposition so it should be faster than\n      # the generic NMatrix#solve method.\n      # Doesn't modify inputs.\n      # Requires either the nmatrix-atlas or nmatrix-lapacke gem.\n      # * *Arguments* :\n      #   - +uplo+ -> Either +:upper+ or +:lower+. Specifies which half of +a+ to read.\n      #   - +a+ -> The matrix A.\n      #   - +b+ -> The right-hand side B.\n      # * *Returns* :\n      #   - The solution X\n      def posv(uplo, a, b)\n        raise(NotImplementedError, \"Either the nmatrix-atlas or nmatrix-lapacke gem must be installed to use posv\")\n      end\n\n      #     laswp(matrix, ipiv) -> NMatrix\n      #\n      # Permute the columns of a matrix (in-place) according to the Array +ipiv+.\n      #\n      def laswp(matrix, ipiv)\n        raise(ArgumentError, \"expected NMatrix for argument 0\") unless matrix.is_a?(NMatrix)\n        raise(StorageTypeError, \"LAPACK functions only work on :dense NMatrix instances\") unless matrix.stype == :dense\n        raise(ArgumentError, \"expected Array ipiv to have no more entries than NMatrix a has columns\") if ipiv.size > matrix.shape[1]\n\n        clapack_laswp(matrix.shape[0], matrix, matrix.shape[1], 0, ipiv.size-1, ipiv, 1)\n      end\n\n      def alloc_svd_result(matrix)\n        [\n          NMatrix.new(matrix.shape[0], dtype: matrix.dtype),\n          NMatrix.new([[matrix.shape[0],matrix.shape[1]].min,1], dtype: matrix.abs_dtype),\n          NMatrix.new(matrix.shape[1], dtype: matrix.dtype)\n        ]\n      end\n\n\n      #\n      # call-seq:\n      #     gesvd(matrix) -> [u, sigma, v_transpose]\n      #     gesvd(matrix) -> [u, sigma, v_conjugate_transpose] # complex\n      #\n      # Compute the singular value decomposition of a matrix using LAPACK's GESVD function.\n      #\n      # Optionally accepts a +workspace_size+ parameter, which will be honored only if it is larger than what LAPACK\n      # requires.\n      #\n      # Requires either the nmatrix-lapacke or nmatrix-atlas gem.\n      #\n      def gesvd(matrix, workspace_size=1)\n        raise(NotImplementedError,\"gesvd requires either the nmatrix-atlas or nmatrix-lapacke gem\")\n      end\n\n      #\n      # call-seq:\n      #     gesdd(matrix) -> [u, sigma, v_transpose]\n      #     gesdd(matrix) -> [u, sigma, v_conjugate_transpose] # complex\n      #\n      # Compute the singular value decomposition of a matrix using LAPACK's GESDD function. This uses a divide-and-conquer\n      # strategy. See also #gesvd.\n      #\n      # Optionally accepts a +workspace_size+ parameter, which will be honored only if it is larger than what LAPACK\n      # requires.\n      #\n      # Requires either the nmatrix-lapacke or nmatrix-atlas gem.\n      #\n      def gesdd(matrix, workspace_size=nil)\n        raise(NotImplementedError,\"gesvd requires either the nmatrix-atlas or nmatrix-lapacke gem\")\n      end\n\n      #\n      # call-seq:\n      #     geev(matrix) -> [eigenvalues, left_eigenvectors, right_eigenvectors]\n      #     geev(matrix, :left) -> [eigenvalues, left_eigenvectors]\n      #     geev(matrix, :right) -> [eigenvalues, right_eigenvectors]\n      #\n      # Perform eigenvalue decomposition on a matrix using LAPACK's xGEEV function.\n      #\n      # +eigenvalues+ is a n-by-1 NMatrix containing the eigenvalues.\n      #\n      # +right_eigenvalues+ is a n-by-n matrix such that its j'th column\n      # contains the (right) eigenvalue of +matrix+ corresponding\n      # to the j'th eigenvalue.\n      # This means that +matrix+ = RDR^(-1),\n      # where R is +right_eigenvalues+ and D is the diagonal matrix formed\n      # from +eigenvalues+.\n      #\n      # +left_eigenvalues+ is n-by-n and its columns are the left\n      # eigenvalues of +matrix+, using the {definition of left eigenvalue\n      # from LAPACK}[https://software.intel.com/en-us/node/521147].\n      #\n      # For real dtypes, +eigenvalues+ and the eigenvector matrices\n      # will be complex if and only if +matrix+ has complex eigenvalues.\n      #\n      # Only available if nmatrix-lapack or nmatrix-atlas is installed.\n      #\n      def geev(matrix, which=:both)\n        raise(NotImplementedError, \"geev requires either the nmatrix-atlas or nmatrix-lapack gem\")\n      end\n\n      # The following are functions that used to be implemented in C, but\n      # now require nmatrix-atlas to run properly, so we can just\n      # implemented their stubs in Ruby.\n      def lapack_gesvd(jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, lwork)\n        raise(NotImplementedError,\"lapack_gesvd requires the nmatrix-atlas gem\")\n      end\n\n      def lapack_gesdd(jobz, m, n, a, lda, s, u, ldu, vt, ldvt, lwork)\n        raise(NotImplementedError,\"lapack_gesdd requires the nmatrix-atlas gem\")\n      end\n\n      def lapack_geev(jobvl, jobvr, n, a, lda, w, wi, vl, ldvl, vr, ldvr, lwork)\n        raise(NotImplementedError,\"lapack_geev requires the nmatrix-atlas gem\")\n      end\n\n      def clapack_potrf(order, uplo, n, a, lda)\n        raise(NotImplementedError,\"clapack_potrf requires the nmatrix-atlas gem\")\n      end\n\n      def clapack_potri(order, uplo, n, a, lda)\n        raise(NotImplementedError,\"clapack_potri requires the nmatrix-atlas gem\")\n      end\n\n      def clapack_potrs(order, uplo, n, nrhs, a, lda, b, ldb)\n        raise(NotImplementedError,\"clapack_potrs requires the nmatrix-atlas gem\")\n      end\n\n      def clapack_getri(order, n, a, lda, ipiv)\n        raise(NotImplementedError,\"clapack_getri requires the nmatrix-atlas gem\")\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "lib/nmatrix/lapack_ext_common.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == lapack_ext_common.rb\n#\n# Contains functions shared by nmatrix-atlas and nmatrix-lapacke gems.\n#++\n\nclass NMatrix\n  def NMatrix.register_lapack_extension(name)\n    if (defined? @@lapack_extension)\n      raise \"Attempting to load #{name} when #{@@lapack_extension} is already loaded. You can only load one LAPACK extension.\"\n    end\n\n    @@lapack_extension = name\n  end\n\n  alias_method :internal_dot, :dot\n\n  def dot(right_v)\n    if (right_v.is_a?(NMatrix) && self.stype == :dense && right_v.stype == :dense &&\n        self.dim == 2 && right_v.dim == 2 && self.shape[1] == right_v.shape[0])\n\n      result_dtype = NMatrix.upcast(self.dtype,right_v.dtype)\n      left = self.dtype == result_dtype ? self : self.cast(dtype: result_dtype)\n      right = right_v.dtype == result_dtype ? right_v : right_v.cast(dtype: result_dtype)\n\n      left = left.clone if left.is_ref?\n      right = right.clone if right.is_ref?\n\n      result_m = left.shape[0]\n      result_n = right.shape[1]\n      left_n = left.shape[1]\n      vector = result_n == 1\n      result = NMatrix.new([result_m,result_n], dtype: result_dtype)\n\n      if vector\n        NMatrix::BLAS.cblas_gemv(false, result_m, left_n, 1, left, left_n, right, 1, 0, result, 1)\n      else\n        NMatrix::BLAS.cblas_gemm(:row, false, false, result_m, result_n, left_n, 1, left, left_n, right, result_n, 0, result, result_n)\n      end\n      return result\n    else\n      #internal_dot will handle non-dense matrices (and also dot-products for NMatrix's with dim=1),\n      #and also all error-handling if the input is not valid\n      self.internal_dot(right_v)\n    end\n  end\nend\n"
  },
  {
    "path": "lib/nmatrix/lapack_plugin.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == lapack_plugin.rb\n#\n# This file `require`s either nmatrix-atlas or nmatrix-lapacke depending on which\n# is available.\n#\n# The idea is that if a developer wants to use a LAPACK feature which is provided\n# by both of these gems (e.g. NMatrix#potrf! or NMatrix::LAPACK.geev),\n# but doesn't care which one is installed, they can\n# just `require 'nmatrix/lapack_plugin'` rather than having to choose between\n# `require 'nmatrix/lapacke'` or `require 'nmatrix/lapacke'` \n#++\n\nbegin\n  require 'nmatrix/atlas'\nrescue LoadError\n  begin\n    require 'nmatrix/lapacke'\n  rescue LoadError\n    raise(LoadError,\"Either nmatrix-atlas or nmatrix-lapacke must be installed\")\n  end\nend\n"
  },
  {
    "path": "lib/nmatrix/lapacke.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == lapacke.rb\n#\n# ruby file for the nmatrix-lapacke gem. Loads the C extension and defines\n# nice ruby interfaces for LAPACK functions.\n#++\n\nrequire 'nmatrix/nmatrix.rb' #need to have nmatrix required first or else bad things will happen\nrequire_relative 'lapack_ext_common'\n\nNMatrix.register_lapack_extension(\"nmatrix-lapacke\")\n\nrequire \"nmatrix_lapacke.so\"\n\nclass NMatrix\n  #Add functions from the LAPACKE C extension to the main LAPACK and BLAS modules.\n  #This will overwrite the original functions where applicable.\n  module LAPACK\n    class << self\n      NMatrix::LAPACKE::LAPACK.singleton_methods.each do |m|\n        define_method m, NMatrix::LAPACKE::LAPACK.method(m).to_proc\n      end\n    end\n  end\n\n  module BLAS\n    class << self\n      NMatrix::LAPACKE::BLAS.singleton_methods.each do |m|\n        define_method m, NMatrix::LAPACKE::BLAS.method(m).to_proc\n      end\n    end\n  end\n\n  module LAPACK\n    class << self\n      def posv(uplo, a, b)\n        raise(ShapeError, \"a must be square\") unless a.dim == 2 && a.shape[0] == a.shape[1]\n        raise(ShapeError, \"number of rows of b must equal number of cols of a\") unless a.shape[1] == b.shape[0]\n        raise(StorageTypeError, \"only works with dense matrices\") unless a.stype == :dense && b.stype == :dense\n        raise(DataTypeError, \"only works for non-integer, non-object dtypes\") if \n          a.integer_dtype? || a.object_dtype? || b.integer_dtype? || b.object_dtype?\n\n        x     = b.clone\n        clone = a.clone\n        n = a.shape[0]\n        nrhs = b.shape[1]\n        lapacke_potrf(:row, uplo, n, clone, n)\n        lapacke_potrs(:row, uplo, n, nrhs, clone, n, x, b.shape[1])\n        x\n      end\n\n      def geev(matrix, which=:both)\n        raise(StorageTypeError, \"LAPACK functions only work on dense matrices\") unless matrix.dense?\n        raise(ShapeError, \"eigenvalues can only be computed for square matrices\") unless matrix.dim == 2 && matrix.shape[0] == matrix.shape[1]\n\n        jobvl = (which == :both || which == :left) ? :t : false\n        jobvr = (which == :both || which == :right) ? :t : false\n\n        # Copy the matrix so it doesn't get overwritten.\n        temporary_matrix = matrix.clone\n        n = matrix.shape[0]\n\n        # Outputs\n        eigenvalues = NMatrix.new([n, 1], dtype: matrix.dtype) # For real dtypes this holds only the real part of the eigenvalues.\n        imag_eigenvalues = matrix.complex_dtype? ? nil : NMatrix.new([n, 1], dtype: matrix.dtype) # For complex dtypes, this is unused.\n        left_output      = jobvl ? matrix.clone_structure : nil\n        right_output     = jobvr ? matrix.clone_structure : nil\n\n        NMatrix::LAPACK::lapacke_geev(:row,\n                                      jobvl, # compute left eigenvectors of A?\n                                      jobvr, # compute right eigenvectors of A? (left eigenvectors of A**T)\n                                      n, # order of the matrix\n                                      temporary_matrix,# input matrix (used as work)\n                                      n, # leading dimension of matrix\n                                      eigenvalues,# real part of computed eigenvalues\n                                      imag_eigenvalues,# imag part of computed eigenvalues\n                                      left_output,     # left eigenvectors, if applicable\n                                      n, # leading dimension of left_output\n                                      right_output,    # right eigenvectors, if applicable\n                                      n) # leading dimension of right_output\n\n\n        # For real dtypes, transform left_output and right_output into correct forms.\n        # If the j'th and the (j+1)'th eigenvalues form a complex conjugate\n        # pair, then the j'th and (j+1)'th columns of the matrix are\n        # the real and imag parts of the eigenvector corresponding\n        # to the j'th eigenvalue.\n        if !matrix.complex_dtype?\n          complex_indices = []\n          n.times do |i|\n            complex_indices << i if imag_eigenvalues[i] != 0.0\n          end\n\n          if !complex_indices.empty?\n            # For real dtypes, put the real and imaginary parts together\n            eigenvalues = eigenvalues + imag_eigenvalues*Complex(0.0,1.0)\n            left_output = left_output.cast(dtype: NMatrix.upcast(:complex64, matrix.dtype)) if left_output\n            right_output = right_output.cast(dtype: NMatrix.upcast(:complex64, matrix.dtype)) if right_output\n          end\n\n          complex_indices.each_slice(2) do |i, _|\n            if right_output\n              right_output[0...n,i] = right_output[0...n,i] + right_output[0...n,i+1]*Complex(0.0,1.0)\n              right_output[0...n,i+1] = right_output[0...n,i].complex_conjugate\n            end\n\n            if left_output\n              left_output[0...n,i] = left_output[0...n,i] + left_output[0...n,i+1]*Complex(0.0,1.0)\n              left_output[0...n,i+1] = left_output[0...n,i].complex_conjugate\n            end\n          end\n        end\n\n        if which == :both\n          return [eigenvalues, left_output, right_output]\n        elsif which == :left\n          return [eigenvalues, left_output]\n        else\n          return [eigenvalues, right_output]\n        end\n      end\n\n      def gesvd(matrix, workspace_size=1)\n        result = alloc_svd_result(matrix)\n\n        m = matrix.shape[0]\n        n = matrix.shape[1]\n\n        superb = NMatrix.new([[m,n].min], dtype: matrix.abs_dtype)\n\n        NMatrix::LAPACK::lapacke_gesvd(:row, :a, :a, m, n, matrix, n, result[1], result[0], m, result[2], n, superb)\n        result\n      end\n\n      def gesdd(matrix, workspace_size=nil)\n        result = alloc_svd_result(matrix)\n\n        m = matrix.shape[0]\n        n = matrix.shape[1]\n\n        NMatrix::LAPACK::lapacke_gesdd(:row, :a, m, n, matrix, n, result[1], result[0], m, result[2], n)\n        result\n      end\n    end\n  end\n\n  def getrf!\n    raise(StorageTypeError, \"LAPACK functions only work on dense matrices\") unless self.dense?\n\n    ipiv = NMatrix::LAPACK::lapacke_getrf(:row, self.shape[0], self.shape[1], self, self.shape[1])\n\n    return ipiv\n  end\n\n  def invert!\n    raise(StorageTypeError, \"invert only works on dense matrices currently\") unless self.dense?\n    raise(ShapeError, \"Cannot invert non-square matrix\") unless shape[0] == shape[1]\n    raise(DataTypeError, \"Cannot invert an integer matrix in-place\") if self.integer_dtype?\n\n    # Get the pivot array; factor the matrix\n    n = self.shape[0]\n    pivot = NMatrix::LAPACK::lapacke_getrf(:row, n, n, self, n)\n    # Now calculate the inverse using the pivot array\n    NMatrix::LAPACK::lapacke_getri(:row, n, self, n, pivot)\n\n    self\n  end\n\n  def potrf!(which)\n    raise(StorageTypeError, \"LAPACK functions only work on dense matrices\") unless self.dense?\n    raise(ShapeError, \"Cholesky decomposition only valid for square matrices\") unless self.dim == 2 && self.shape[0] == self.shape[1]\n\n    NMatrix::LAPACK::lapacke_potrf(:row, which, self.shape[0], self, self.shape[1])\n  end\n\n  def solve(b, opts = {})\n    raise(ShapeError, \"Must be called on square matrix\") unless self.dim == 2 && self.shape[0] == self.shape[1]\n    raise(ShapeError, \"number of rows of b must equal number of cols of self\") if \n      self.shape[1] != b.shape[0]\n    raise(ArgumentError, \"only works with dense matrices\") if self.stype != :dense\n    raise(ArgumentError, \"only works for non-integer, non-object dtypes\") if \n      integer_dtype? or object_dtype? or b.integer_dtype? or b.object_dtype?\n\n    opts = { form: :general }.merge(opts)\n    x    = b.clone\n    n    = self.shape[0]\n    nrhs = b.shape[1]\n\n    case opts[:form] \n    when :general\n      clone = self.clone\n      ipiv = NMatrix::LAPACK.lapacke_getrf(:row, n, n, clone, n)\n      NMatrix::LAPACK.lapacke_getrs(:row, :no_transpose, n, nrhs, clone, n, ipiv, x, nrhs)\n      x\n    when :upper_tri, :upper_triangular\n      raise(ArgumentError, \"upper triangular solver does not work with complex dtypes\") if\n        complex_dtype? or b.complex_dtype?\n      NMatrix::BLAS::cblas_trsm(:row, :left, :upper, false, :nounit, n, nrhs, 1.0, self, n, x, nrhs)\n      x\n    when :lower_tri, :lower_triangular\n      raise(ArgumentError, \"lower triangular solver does not work with complex dtypes\") if\n        complex_dtype? or b.complex_dtype?\n      NMatrix::BLAS::cblas_trsm(:row, :left, :lower, false, :nounit, n, nrhs, 1.0, self, n, x, nrhs)\n      x\n    when :pos_def, :positive_definite\n      u, l = self.factorize_cholesky\n      z = l.solve(b, form: :lower_tri)\n      u.solve(z, form: :upper_tri)\n    else\n      raise(ArgumentError, \"#{opts[:form]} is not a valid form option\")\n    end\n  end\n\n  #\n  # call-seq:\n  #     geqrf! -> shape.min x 1 NMatrix \n  #\n  # QR factorization of a general M-by-N matrix +A+. \n  #\n  # The QR factorization is A = QR, where Q is orthogonal and R is Upper Triangular\n  # +A+ is overwritten with the elements of R and Q with Q being represented by the \n  # elements below A's diagonal and an array of scalar factors in the output NMatrix. \n  #\n  # The matrix Q is represented as a product of elementary reflectors\n  #     Q = H(1) H(2) . . . H(k), where k = min(m,n).\n  #\n  # Each H(i) has the form\n  #\n  #     H(i) = I - tau * v * v'\n  #\n  # http://www.netlib.org/lapack/explore-html/d3/d69/dgeqrf_8f.html\n  # \n  # Only works for dense matrices.\n  #\n  # * *Returns* :\n  #   - Vector TAU. Q and R are stored in A. Q is represented by TAU and A\n  # * *Raises* :\n  #   - +StorageTypeError+ -> LAPACK functions only work on dense matrices.\n  #\n  def geqrf!\n    raise(StorageTypeError, \"LAPACK functions only work on dense matrices\") unless self.dense?\n    \n    tau = NMatrix.new([self.shape.min,1], dtype: self.dtype)\n    NMatrix::LAPACK::lapacke_geqrf(:row, self.shape[0], self.shape[1], self, self.shape[1], tau)\n    \n    tau\n  end\n  \n  #\n  # call-seq:\n  #     ormqr(tau) -> NMatrix\n  #     ormqr(tau, side, transpose, c) -> NMatrix\n  #\n  # Returns the product Q * c or c * Q after a call to geqrf! used in QR factorization. \n  # +c+ is overwritten with the elements of the result NMatrix if supplied. Q is the orthogonal matrix \n  # represented by tau and the calling NMatrix\n  # \n  # Only works on float types, use unmqr for complex types.\n  #\n  # == Arguments\n  #\n  # * +tau+ - vector containing scalar factors of elementary reflectors\n  # * +side+ - direction of multiplication [:left, :right]\n  # * +transpose+ - apply Q with or without transpose [false, :transpose] \n  # * +c+ - NMatrix multplication argument that is overwritten, no argument assumes c = identity\n  #\n  # * *Returns* :\n  #\n  #   - Q * c or c * Q Where Q may be transposed before multiplication. \n  #    \n  #\n  # * *Raises* :\n  #   - +StorageTypeError+ -> LAPACK functions only work on dense matrices.\n  #   - +TypeError+ -> Works only on floating point matrices, use unmqr for complex types\n  #   - +TypeError+ -> c must have the same dtype as the calling NMatrix\n  #\n  def ormqr(tau, side=:left, transpose=false, c=nil)\n    raise(StorageTypeError, \"LAPACK functions only work on dense matrices\") unless self.dense?\n    raise(TypeError, \"Works only on floating point matrices, use unmqr for complex types\") if self.complex_dtype?\n    raise(TypeError, \"c must have the same dtype as the calling NMatrix\") if c and c.dtype != self.dtype\n\n\n    #Default behaviour produces Q * I  = Q if c is not supplied.\n    result = c ? c.clone : NMatrix.identity(self.shape[0], dtype: self.dtype)\n    NMatrix::LAPACK::lapacke_ormqr(:row, side, transpose, result.shape[0], result.shape[1], tau.shape[0], self, self.shape[1], tau, result, result.shape[1])\n    \n    result\n  end\n\n  #\n  # call-seq:\n  #     unmqr(tau) -> NMatrix\n  #     unmqr(tau, side, transpose, c) -> NMatrix\n  #\n  # Returns the product Q * c or c * Q after a call to geqrf! used in QR factorization. \n  # +c+ is overwritten with the elements of the result NMatrix if it is supplied. Q is the orthogonal matrix \n  # represented by tau and the calling NMatrix\n  # \n  # Only works on complex types, use ormqr for float types.\n  #\n  # == Arguments\n  #\n  # * +tau+ - vector containing scalar factors of elementary reflectors\n  # * +side+ - direction of multiplication [:left, :right]\n  # * +transpose+ - apply Q as Q or its complex conjugate [false, :complex_conjugate] \n  # * +c+ - NMatrix multplication argument that is overwritten, no argument assumes c = identity\n  #\n  # * *Returns* :\n  #\n  #   - Q * c or c * Q Where Q may be transformed to its complex conjugate before multiplication. \n  #    \n  #\n  # * *Raises* :\n  #   - +StorageTypeError+ -> LAPACK functions only work on dense matrices.\n  #   - +TypeError+ -> Works only on floating point matrices, use unmqr for complex types\n  #   - +TypeError+ -> c must have the same dtype as the calling NMatrix\n  #\n  def unmqr(tau, side=:left, transpose=false, c=nil)\n    raise(StorageTypeError, \"ATLAS functions only work on dense matrices\") unless self.dense?\n    raise(TypeError, \"Works only on complex matrices, use ormqr for normal floating point matrices\") unless self.complex_dtype?\n    raise(TypeError, \"c must have the same dtype as the calling NMatrix\") if c and c.dtype != self.dtype\n\n    #Default behaviour produces Q * I  = Q if c is not supplied.\n    result = c ? c.clone : NMatrix.identity(self.shape[0], dtype: self.dtype)\n    NMatrix::LAPACK::lapacke_unmqr(:row, side, transpose, result.shape[0], result.shape[1], tau.shape[0], self, self.shape[1], tau, result, result.shape[1])\n    \n    result\n  end\n\n\nend\n"
  },
  {
    "path": "lib/nmatrix/math.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == math.rb\n#\n# Math functionality for NMatrix, along with any NMatrix instance\n# methods that correspond to ATLAS/BLAS/LAPACK functions (e.g.,\n# laswp).\n#++\n\nclass NMatrix\n\n  module NMMath #:nodoc:\n    METHODS_ARITY_2 = [:atan2, :ldexp, :hypot]\n    METHODS_ARITY_1 = [:cos, :sin, :tan, :acos, :asin, :atan, :cosh, :sinh, :tanh, :acosh,\n      :asinh, :atanh, :exp, :log2, :log10, :sqrt, :cbrt, :erf, :erfc, :gamma, :-@]\n  end\n\n  # Methods for generating permutation matrix from LU factorization results.\n  module FactorizeLUMethods\n    class << self\n      def permutation_matrix_from(pivot_array)\n        perm_arry = permutation_array_for(pivot_array)\n        n         = NMatrix.zeros(perm_arry.size, dtype: :byte)\n\n        perm_arry.each_with_index { |e, i| n[e,i] = 1 }\n\n        n\n      end\n\n      def permutation_array_for(pivot_array)\n        perm_arry = Array.new(pivot_array.size) { |i| i }\n        perm_arry.each_index do |i|\n          #the pivot indices returned by LAPACK getrf are indexed starting\n          #from 1, so we need to subtract 1 here\n          perm_arry[i], perm_arry[pivot_array[i]-1] = perm_arry[pivot_array[i]-1], perm_arry[i]\n        end\n\n        perm_arry\n      end\n    end\n  end\n\n  #\n  # call-seq:\n  #     invert! -> NMatrix\n  #\n  # Use LAPACK to calculate the inverse of the matrix (in-place) if available.\n  # Only works on dense matrices. Alternatively uses in-place Gauss-Jordan\n  # elimination.\n  #\n  # * *Raises* :\n  #   - +StorageTypeError+ -> only implemented on dense matrices.\n  #   - +ShapeError+ -> matrix must be square.\n  #   - +DataTypeError+ -> cannot invert an integer matrix in-place.\n  #\n  def invert!\n    raise(StorageTypeError, \"invert only works on dense matrices currently\") unless self.dense?\n    raise(ShapeError, \"Cannot invert non-square matrix\") unless self.dim == 2 && self.shape[0] == self.shape[1]\n    raise(DataTypeError, \"Cannot invert an integer matrix in-place\") if self.integer_dtype?\n\n    #No internal implementation of getri, so use this other function\n    __inverse__(self, true)\n  end\n\n  #\n  # call-seq:\n  #     invert -> NMatrix\n  #\n  # Make a copy of the matrix, then invert using Gauss-Jordan elimination.\n  # Works without LAPACK.\n  #\n  # * *Returns* :\n  #   - A dense NMatrix. Will be the same type as the input NMatrix,\n  #   except if the input is an integral dtype, in which case it will be a\n  #   :float64 NMatrix.\n  #\n  # * *Raises* :\n  #   - +StorageTypeError+ -> only implemented on dense matrices.\n  #   - +ShapeError+ -> matrix must be square.\n  #\n  def invert\n    #write this in terms of invert! so plugins will only have to overwrite\n    #invert! and not invert\n    if self.integer_dtype?\n      cloned = self.cast(dtype: :float64)\n      cloned.invert!\n    else\n      cloned = self.clone\n      cloned.invert!\n    end\n  end\n  alias :inverse :invert\n\n  # call-seq:\n  #     exact_inverse! -> NMatrix\n  #\n  # Calulates inverse_exact of a matrix of size 2 or 3.\n  # Only works on dense matrices.\n  #\n  # * *Raises* :\n  #   - +DataTypeError+ -> cannot invert an integer matrix in-place.\n  #   - +NotImplementedError+ -> cannot find exact inverse of matrix with size greater than 3  #\n  def exact_inverse!\n    raise(ShapeError, \"Cannot invert non-square matrix\") unless self.dim == 2 && self.shape[0] == self.shape[1]\n    raise(DataTypeError, \"Cannot invert an integer matrix in-place\") if self.integer_dtype?\n    #No internal implementation of getri, so use this other function\n    n = self.shape[0]\n    if n>3\n      raise(NotImplementedError, \"Cannot find exact inverse of matrix of size greater than 3\")\n    else\n      clond=self.clone\n      __inverse_exact__(clond, n, n)\n    end\n  end\n\n  #\n  # call-seq:\n  #     exact_inverse -> NMatrix\n  #\n  # Make a copy of the matrix, then invert using exact_inverse\n  #\n  # * *Returns* :\n  #   - A dense NMatrix. Will be the same type as the input NMatrix,\n  #   except if the input is an integral dtype, in which case it will be a\n  #   :float64 NMatrix.\n  #\n  # * *Raises* :\n  #   - +StorageTypeError+ -> only implemented on dense matrices.\n  #   - +ShapeError+ -> matrix must be square.\n  #   - +NotImplementedError+ -> cannot find exact inverse of matrix with size greater than 3\n  #\n  def exact_inverse\n    #write this in terms of exact_inverse! so plugins will only have to overwrite\n    #exact_inverse! and not exact_inverse\n    if self.integer_dtype?\n      cloned = self.cast(dtype: :float64)\n      cloned.exact_inverse!\n    else\n      cloned = self.clone\n      cloned.exact_inverse!\n    end\n  end\n  alias :invert_exactly :exact_inverse\n\n\n\n  #\n  # call-seq:\n  #     pinv -> NMatrix\n  #\n  # Compute the Moore-Penrose pseudo-inverse of a matrix using its\n  # singular value decomposition (SVD).\n  #\n  # This function requires the nmatrix-atlas gem installed.\n  #\n  # * *Arguments* :\n  #  - +tolerance(optional)+ -> Cutoff for small singular values.\n  #\n  # * *Returns* :\n  #   -  Pseudo-inverse matrix.\n  #\n  # * *Raises* :\n  #   - +NotImplementedError+ -> If called without nmatrix-atlas or nmatrix-lapacke gem.\n  #   - +TypeError+ -> If called without float or complex data type.\n  #\n  # * *Examples* :\n  #\n  #  a = NMatrix.new([2,2],[1,2,\n  #                         3,4], dtype: :float64)\n  #  a.pinv # => [ [-2.0000000000000018, 1.0000000000000007]\n  #                [1.5000000000000016, -0.5000000000000008] ]\n  #\n  #  b = NMatrix.new([4,1],[1,2,3,4], dtype: :float64)\n  #  b.pinv # => [ [ 0.03333333, 0.06666667, 0.99999999, 0.13333333] ]\n  #\n  # == References\n  #\n  # * https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_pseudoinverse\n  # * G. Strang, Linear Algebra and Its Applications, 2nd Ed., Orlando, FL, Academic Press\n  #\n  def pinv(tolerance = 1e-15)\n    raise DataTypeError, \"pinv works only with matrices of float or complex data type\" unless\n      [:float32, :float64, :complex64, :complex128].include?(dtype)\n    if self.complex_dtype?\n      u, s, vt = self.complex_conjugate.gesvd # singular value decomposition\n    else\n      u, s, vt = self.gesvd\n    end\n    rows = self.shape[0]\n    cols = self.shape[1]\n    if rows < cols\n      u_reduced = u\n      vt_reduced = vt[0..rows - 1, 0..cols - 1].transpose\n    else\n      u_reduced = u[0..rows - 1, 0..cols - 1]\n      vt_reduced = vt.transpose\n    end\n    largest_singular_value = s.max.to_f\n    cutoff = tolerance * largest_singular_value\n    (0...[rows, cols].min).each do |i|\n      s[i] = 1 / s[i] if s[i] > cutoff\n      s[i] = 0        if s[i] <= cutoff\n    end\n    multiplier = u_reduced.dot(NMatrix.diagonal(s.to_a)).transpose\n    vt_reduced.dot(multiplier)\n  end\n  alias :pseudo_inverse :pinv\n  alias :pseudoinverse :pinv\n\n\n  #\n  # call-seq:\n  #     adjugate! -> NMatrix\n  #\n  # Calculate the adjugate of the matrix (in-place).\n  # Only works on dense matrices.\n  #\n  # * *Raises* :\n  #   - +StorageTypeError+ -> only implemented on dense matrices.\n  #   - +ShapeError+ -> matrix must be square.\n  #   - +DataTypeError+ -> cannot calculate adjugate of an integer matrix in-place.\n  #\n  def adjugate!\n    raise(StorageTypeError, \"adjugate only works on dense matrices currently\") unless self.dense?\n    raise(ShapeError, \"Cannot calculate adjugate of a non-square matrix\") unless self.dim == 2 && self.shape[0] == self.shape[1]\n    raise(DataTypeError, \"Cannot calculate adjugate of an integer matrix in-place\") if self.integer_dtype?\n    d = self.det\n    self.invert!\n    self.map! { |e| e * d }\n    self\n  end\n  alias :adjoint! :adjugate!\n\n  #\n  # call-seq:\n  #     adjugate -> NMatrix\n  #\n  # Make a copy of the matrix and calculate the adjugate of the matrix.\n  # Only works on dense matrices.\n  #\n  # * *Returns* :\n  #   - A dense NMatrix. Will be the same type as the input NMatrix,\n  #   except if the input is an integral dtype, in which case it will be a\n  #   :float64 NMatrix.\n  #\n  # * *Raises* :\n  #   - +StorageTypeError+ -> only implemented on dense matrices.\n  #   - +ShapeError+ -> matrix must be square.\n  #\n  def adjugate\n    raise(StorageTypeError, \"adjugate only works on dense matrices currently\") unless self.dense?\n    raise(ShapeError, \"Cannot calculate adjugate of a non-square matrix\") unless self.dim == 2 && self.shape[0] == self.shape[1]\n    d = self.det\n    mat = self.invert\n    mat.map! { |e| e * d }\n    mat\n  end\n  alias :adjoint :adjugate\n\n  # Reduce self to upper hessenberg form using householder transforms.\n  #\n  # == References\n  #\n  # * http://en.wikipedia.org/wiki/Hessenberg_matrix\n  # * http://www.mymathlib.com/c_source/matrices/eigen/hessenberg_orthog.c\n  def hessenberg\n    clone.hessenberg!\n  end\n\n  # Destructive version of #hessenberg\n  def hessenberg!\n    raise ShapeError, \"Trying to reduce non 2D matrix to hessenberg form\" if\n      shape.size != 2\n    raise ShapeError, \"Trying to reduce non-square matrix to hessenberg form\" if\n      shape[0] != shape[1]\n    raise StorageTypeError, \"Matrix must be dense\" if stype != :dense\n    raise TypeError, \"Works with float matrices only\" unless\n      [:float64,:float32].include?(dtype)\n\n    __hessenberg__(self)\n    self\n  end\n\n\n  # call-seq:\n  #   matrix_norm -> Numeric\n  #\n  #  Calculates the selected norm (defaults to 2-norm) of a 2D matrix.\n  #\n  #  This should be used for small or medium sized matrices.\n  #  For greater matrices, there should be a separate implementation where\n  #  the norm is estimated rather than computed, for the sake of computation speed.\n  #\n  #  Currently implemented norms are 1-norm, 2-norm, Frobenius, Infinity.\n  #  A minus on the 1, 2 and inf norms returns the minimum instead of the maximum value.\n  #\n  #  Tested mainly with dense matrices. Further checks and modifications might\n  #  be necessary for sparse matrices.\n  #\n  # * *Returns* :\n  # - The selected norm of the matrix.\n  # * *Raises* :\n  # - +NotImplementedError+ -> norm can be calculated only for 2D matrices\n  # - +ArgumentError+ -> unrecognized norm\n  #\n  def matrix_norm type = 2\n    raise(NotImplementedError, \"norm can be calculated only for 2D matrices\") unless self.dim == 2\n    raise(NotImplementedError, \"norm only implemented for dense storage\") unless self.stype == :dense\n    raise(ArgumentError, \"norm not defined for byte dtype\")if self.dtype == :byte\n    case type\n    when nil, 2, -2\n      return self.two_matrix_norm (type == -2)\n    when 1, -1\n      return self.one_matrix_norm (type == -1)\n    when :frobenius, :fro\n      return self.fro_matrix_norm\n    when :infinity, :inf, :'-inf', :'-infinity'\n      return self.inf_matrix_norm  (type == :'-inf' || type == :'-infinity')\n    else\n      raise ArgumentError.new(\"argument must be a valid integer or symbol\")\n    end\n  end\n\n  # Calculate the variance co-variance matrix\n  #\n  # == Options\n  #\n  # * +:for_sample_data+ - Default true. If set to false will consider the denominator for\n  #   population data (i.e. N, as opposed to N-1 for sample data).\n  #\n  # == References\n  #\n  # * http://stattrek.com/matrix-algebra/covariance-matrix.aspx\n  def cov(opts={})\n    raise TypeError, \"Only works for non-integer dtypes\" if integer_dtype?\n     opts = {\n      for_sample_data: true\n    }.merge(opts)\n\n    denominator      = opts[:for_sample_data] ? rows - 1 : rows\n    ones             = NMatrix.ones [rows,1]\n    deviation_scores = self - ones.dot(ones.transpose).dot(self) / rows\n    deviation_scores.transpose.dot(deviation_scores) / denominator\n  end\n\n  # Calculate the correlation matrix.\n  def corr\n    raise NotImplementedError, \"Does not work for complex dtypes\" if complex_dtype?\n    standard_deviation = std\n    cov / (standard_deviation.transpose.dot(standard_deviation))\n  end\n\n  # Raise a square matrix to a power. Be careful of numeric overflows!\n  # In case *n* is 0, an identity matrix of the same dimension is returned. In case\n  # of negative *n*, the matrix is inverted and the absolute value of *n* taken\n  # for computing the power.\n  #\n  # == Arguments\n  #\n  # * +n+ - Integer to which self is to be raised.\n  #\n  # == References\n  #\n  # * R.G Dromey - How to Solve it by Computer. Link -\n  #     http://www.amazon.com/Solve-Computer-Prentice-Hall-International-Science/dp/0134340019/ref=sr_1_1?ie=UTF8&qid=1422605572&sr=8-1&keywords=how+to+solve+it+by+computer\n  def pow n\n    raise ShapeError, \"Only works with 2D square matrices.\" if\n      shape[0] != shape[1] or shape.size != 2\n    raise TypeError, \"Only works with integer powers\" unless n.is_a?(Integer)\n\n    sequence = (integer_dtype? ? self.cast(dtype: :int64) : self).clone\n    product  = NMatrix.eye shape[0], dtype: sequence.dtype, stype: sequence.stype\n\n    if n == 0\n      return NMatrix.eye(shape, dtype: dtype, stype: stype)\n    elsif n == 1\n      return sequence\n    elsif n < 0\n      n = n.abs\n      sequence.invert!\n      product = NMatrix.eye shape[0], dtype: sequence.dtype, stype: sequence.stype\n    end\n\n    # Decompose n to reduce the number of multiplications.\n    while n > 0\n      product = product.dot(sequence) if n % 2 == 1\n      n = n / 2\n      sequence = sequence.dot(sequence)\n    end\n\n    product\n  end\n\n  # Compute the Kronecker product of +self+ and other NMatrix\n  #\n  # === Arguments\n  #\n  #   * +mat+ - A 2D NMatrix object\n  #\n  # === Usage\n  #\n  #  a = NMatrix.new([2,2],[1,2,\n  #                         3,4])\n  #  b = NMatrix.new([2,3],[1,1,1,\n  #                         1,1,1], dtype: :float64)\n  #  a.kron_prod(b) # => [ [1.0, 1.0, 1.0, 2.0, 2.0, 2.0]\n  #                        [1.0, 1.0, 1.0, 2.0, 2.0, 2.0]\n  #                        [3.0, 3.0, 3.0, 4.0, 4.0, 4.0]\n  #                        [3.0, 3.0, 3.0, 4.0, 4.0, 4.0] ]\n  #\n  def kron_prod(mat)\n    unless self.dimensions==2 and mat.dimensions==2\n      raise ShapeError, \"Implemented for 2D NMatrix objects only.\"\n    end\n\n    # compute the shape [n,m] of the product matrix\n    n, m = self.shape[0]*mat.shape[0], self.shape[1]*mat.shape[1]\n    # compute the entries of the product matrix\n    kron_prod_array = []\n    if self.yale?\n      # +:yale+ requires to get the row by copy in order to apply +#transpose+ to it\n      self.each_row(getby=:copy) do |selfr|\n        mat.each_row do |matr|\n          kron_prod_array += (selfr.transpose.dot matr).to_flat_a\n        end\n      end\n    else\n      self.each_row do |selfr|\n        mat.each_row do |matr|\n          kron_prod_array += (selfr.transpose.dot matr).to_flat_a\n        end\n      end\n    end\n\n    NMatrix.new([n,m], kron_prod_array)\n  end\n\n  #\n  # call-seq:\n  #     trace -> Numeric\n  #\n  # Calculates the trace of an nxn matrix.\n  #\n  # * *Raises* :\n  #   - +ShapeError+ -> Expected square matrix\n  #\n  # * *Returns* :\n  #   - The trace of the matrix (a numeric value)\n  #\n  def trace\n    raise(ShapeError, \"Expected square matrix\") unless self.shape[0] == self.shape[1] && self.dim == 2\n\n    (0...self.shape[0]).inject(0) do |total,i|\n      total + self[i,i]\n    end\n  end\n\n  ##\n  # call-seq:\n  #   mean() -> NMatrix\n  #   mean(dimen) -> NMatrix\n  #\n  # Calculates the mean along the specified dimension.\n  #\n  # This will force integer types to float64 dtype.\n  #\n  # @see #inject_rank\n  #\n  def mean(dimen=0)\n    reduce_dtype = nil\n    if integer_dtype? then\n      reduce_dtype = :float64\n    end\n    inject_rank(dimen, 0.0, reduce_dtype) do |mean, sub_mat|\n      mean + sub_mat\n    end / shape[dimen]\n  end\n\n  ##\n  # call-seq:\n  #   sum() -> NMatrix\n  #   cumsum() -> NMatrix\n  #   sum(dimen) -> NMatrix\n  #   cumsum(dimen) -> NMatrix\n  #\n  # Calculates the sum along the specified dimension.\n  #\n  # @see #inject_rank\n  def sum(dimen=0)\n    inject_rank(dimen, 0.0) do |sum, sub_mat|\n      sum + sub_mat\n    end\n  end\n  alias :cumsum :sum\n\n  ##\n  # call-seq:\n  #   min() -> NMatrix\n  #   min(dimen) -> NMatrix\n  #\n  # Calculates the minimum along the specified dimension.\n  #\n  # @see #inject_rank\n  #\n  def min(dimen=0)\n    inject_rank(dimen) do |min, sub_mat|\n      if min.is_a? NMatrix then\n        min * (min <= sub_mat).cast(self.stype, self.dtype) + ((min)*0.0 + (min > sub_mat).cast(self.stype, self.dtype)) * sub_mat\n      else\n        min <= sub_mat ? min : sub_mat\n      end\n    end\n  end\n\n  ##\n  # call-seq:\n  #   max() -> NMatrix\n  #   max(dimen) -> NMatrix\n  #\n  # Calculates the maximum along the specified dimension.\n  #\n  # @see #inject_rank\n  #\n  def max(dimen=0)\n    inject_rank(dimen) do |max, sub_mat|\n      if max.is_a? NMatrix then\n        max * (max >= sub_mat).cast(self.stype, self.dtype) + ((max)*0.0 + (max < sub_mat).cast(self.stype, self.dtype)) * sub_mat\n      else\n        max >= sub_mat ? max : sub_mat\n      end\n    end\n  end\n\n\n  ##\n  # call-seq:\n  #   variance() -> NMatrix\n  #   variance(dimen) -> NMatrix\n  #\n  # Calculates the sample variance along the specified dimension.\n  #\n  # This will force integer types to float64 dtype.\n  #\n  # @see #inject_rank\n  #\n  def variance(dimen=0)\n    reduce_dtype = nil\n    if integer_dtype? then\n      reduce_dtype = :float64\n    end\n    m = mean(dimen)\n    inject_rank(dimen, 0.0, reduce_dtype) do |var, sub_mat|\n      var + (m - sub_mat)*(m - sub_mat)/(shape[dimen]-1)\n    end\n  end\n\n  ##\n  # call-seq:\n  #   std() -> NMatrix\n  #   std(dimen) -> NMatrix\n  #\n  #\n  # Calculates the sample standard deviation along the specified dimension.\n  #\n  # This will force integer types to float64 dtype.\n  #\n  # @see #inject_rank\n  #\n  def std(dimen=0)\n    variance(dimen).sqrt\n  end\n\n\n  #\n  # call-seq:\n  #     abs_dtype -> Symbol\n  #\n  # Returns the dtype of the result of a call to #abs. In most cases, this is the same as dtype; it should only differ\n  # for :complex64 (where it's :float32) and :complex128 (:float64).\n  def abs_dtype\n    if self.dtype == :complex64\n      :float32\n    elsif self.dtype == :complex128\n      :float64\n    else\n      self.dtype\n    end\n  end\n\n\n  #\n  # call-seq:\n  #     abs -> NMatrix\n  #\n  # Maps all values in a matrix to their absolute values.\n  def abs\n    if stype == :dense\n      self.__dense_map__ { |v| v.abs }\n    elsif stype == :list\n      # FIXME: Need __list_map_stored__, but this will do for now.\n      self.__list_map_merged_stored__(nil, nil) { |v,dummy| v.abs }\n    else\n      self.__yale_map_stored__ { |v| v.abs }\n    end.cast(self.stype, abs_dtype)\n  end\n\n  # Norm calculation methods\n  # Frobenius norm: the Euclidean norm of the matrix, treated as if it were a vector\n  def fro_matrix_norm\n    #float64 has to be used in any case, since nrm2 will not yield correct result for float32\n    self_cast = self.cast(:dtype => :float64)\n\n    column_vector = self_cast.reshape([self.size, 1])\n\n    return column_vector.nrm2\n  end\n\n  # 2-norm: the largest/smallest singular value of the matrix\n  def two_matrix_norm minus = false\n\n    self_cast = self.cast(:dtype => :float64)\n\n    #TODO: confirm if this is the desired svd calculation\n    svd = self_cast.gesvd\n    return svd[1][0, 0] unless minus\n    return svd[1][svd[1].rows-1, svd[1].cols-1]\n  end\n\n  # 1-norm: the maximum/minimum absolute column sum of the matrix\n  def one_matrix_norm minus = false\n    #TODO: change traversing method for sparse matrices\n    number_of_columns = self.cols\n    col_sums = []\n\n    number_of_columns.times do |i|\n      col_sums << self.col(i).inject(0) { |sum, number| sum += number.abs}\n    end\n\n    return col_sums.max unless minus\n    return col_sums.min\n  end\n\n  # Infinity norm: the maximum/minimum absolute row sum of the matrix\n  def inf_matrix_norm minus = false\n    number_of_rows = self.rows\n    row_sums = []\n\n    number_of_rows.times do |i|\n      row_sums << self.row(i).inject(0) { |sum, number| sum += number.abs}\n    end\n\n    return row_sums.max unless minus\n    return row_sums.min\n  end\n\n  #\n  # call-seq:\n  #     positive_definite? -> boolean\n  #\n  # A matrix is positive definite if it’s symmetric and all its eigenvalues are positive\n  #\n  # * *Returns* :\n  #   - A boolean value telling if the NMatrix is positive definite or not.\n  # * *Raises* :\n  #   - +ShapeError+ -> Must be used on square matrices.\n  #\n  def positive_definite?\n    raise(ShapeError, \"positive definite calculated only for square matrices\") unless\n      self.dim == 2 && self.shape[0] == self.shape[1]\n    cond = 0\n    while cond != self.cols\n      if self[0..cond, 0..cond].det <= 0\n        return false\n      end\n      cond += 1\n    end\n    true\n  end\n\n  #\n  # call-seq:\n  #   svd_rank() -> int\n  #   svd_rank(tolerence) ->int\n  # Gives rank of the matrix based on the singular value decomposition.\n  # The rank of a matrix  is computed as the number of diagonal elements in Sigma that are larger than a tolerance\n  #\n  #* *Returns* :\n  # - An integer equal to the rank of the matrix\n  #* *Raises* :\n  #  - +ShapeError+ -> Is only computable on 2-D matrices\n  #\n  def svd_rank(tolerence=\"default\")\n    raise(ShapeError, \"rank calculated only for 2-D matrices\") unless\n      self.dim == 2 \n\n    sigmas = self.gesvd[1].to_a.flatten\n    eps = NMatrix::FLOAT64_EPSILON\n\n    # epsilon depends on the width of the number\n    if (self.dtype == :float32 || self.dtype == :complex64) \n      eps = NMatrix::FLOAT32_EPSILON\n    end\n    case tolerence\n      when \"default\"\n        tolerence = self.shape.max * sigmas.max * eps # tolerence of a Matrix A is max(size(A))*eps(norm(A)). norm(A) is nearly equal to max(sigma of A)\n    end\n    return sigmas.map { |x| x > tolerence ? 1 : 0 }.reduce(:+)\n  end\n\n\n\nprotected\n  # Define the element-wise operations for lists. Note that the __list_map_merged_stored__ iterator returns a Ruby Object\n  # matrix, which we then cast back to the appropriate type. If you don't want that, you can redefine these functions in\n  # your own code.\n  {add: :+, sub: :-, mul: :*, div: :/, pow: :**, mod: :%}.each_pair do |ewop, op|\n    define_method(\"__list_elementwise_#{ewop}__\") do |rhs|\n      self.__list_map_merged_stored__(rhs, nil) { |l,r| l.send(op,r) }.cast(stype, NMatrix.upcast(dtype, rhs.dtype))\n    end\n    define_method(\"__dense_elementwise_#{ewop}__\") do |rhs|\n      self.__dense_map_pair__(rhs) { |l,r| l.send(op,r) }.cast(stype, NMatrix.upcast(dtype, rhs.dtype))\n    end\n    define_method(\"__yale_elementwise_#{ewop}__\") do |rhs|\n      self.__yale_map_merged_stored__(rhs, nil) { |l,r| l.send(op,r) }.cast(stype, NMatrix.upcast(dtype, rhs.dtype))\n    end\n    define_method(\"__list_scalar_#{ewop}__\") do |rhs|\n      self.__list_map_merged_stored__(rhs, nil) { |l,r| l.send(op,r) }.cast(stype, NMatrix.upcast(dtype, NMatrix.min_dtype(rhs)))\n    end\n    define_method(\"__yale_scalar_#{ewop}__\") do |rhs|\n      self.__yale_map_stored__ { |l| l.send(op,rhs) }.cast(stype, NMatrix.upcast(dtype, NMatrix.min_dtype(rhs)))\n    end\n    define_method(\"__dense_scalar_#{ewop}__\") do |rhs|\n      self.__dense_map__ { |l| l.send(op,rhs) }.cast(stype, NMatrix.upcast(dtype, NMatrix.min_dtype(rhs)))\n    end\n  end\n\n  # These don't actually take an argument -- they're called reverse-polish style on the matrix.\n  # This group always gets casted to float64.\n  [:log, :log2, :log10, :sqrt, :sin, :cos, :tan, :acos, :asin, :atan, :cosh, :sinh, :tanh, :acosh,\n   :asinh, :atanh, :exp, :erf, :erfc, :gamma, :cbrt, :round].each do |ewop|\n    define_method(\"__list_unary_#{ewop}__\") do\n      self.__list_map_stored__(nil) { |l| Math.send(ewop, l) }.cast(stype, NMatrix.upcast(dtype, :float64))\n    end\n    define_method(\"__yale_unary_#{ewop}__\") do\n      self.__yale_map_stored__ { |l| Math.send(ewop, l) }.cast(stype, NMatrix.upcast(dtype, :float64))\n    end\n    define_method(\"__dense_unary_#{ewop}__\") do\n      self.__dense_map__ { |l| Math.send(ewop, l) }.cast(stype, NMatrix.upcast(dtype, :float64))\n    end\n  end\n\n  #:stopdoc:\n  # log takes an optional single argument, the base. Default to natural log.\n  def __list_unary_log__(base)\n    self.__list_map_stored__(nil) { |l| Math.log(l, base) }.cast(stype, NMatrix.upcast(dtype, :float64))\n  end\n\n  def __yale_unary_log__(base)\n    self.__yale_map_stored__ { |l| Math.log(l, base) }.cast(stype, NMatrix.upcast(dtype, :float64))\n  end\n\n  def __dense_unary_log__(base)\n    self.__dense_map__ { |l| Math.log(l, base) }.cast(stype, NMatrix.upcast(dtype, :float64))\n  end\n\n  # These are for negating matrix contents using -@\n  def __list_unary_negate__\n    self.__list_map_stored__(nil) { |l| -l }.cast(stype, dtype)\n  end\n\n  def __yale_unary_negate__\n    self.__yale_map_stored__ { |l| -l }.cast(stype, dtype)\n  end\n\n  def __dense_unary_negate__\n    self.__dense_map__ { |l| -l }.cast(stype, dtype)\n  end\n  #:startdoc:\n\n  # These are for rounding each value of a matrix. Takes an optional argument\n  def __list_unary_round__(precision)\n    if self.complex_dtype?\n      self.__list_map_stored__(nil) { |l| Complex(l.real.round(precision), l.imag.round(precision)) }\n                                    .cast(stype, dtype)\n    else\n      self.__list_map_stored__(nil) { |l| l.round(precision) }.cast(stype, dtype)\n    end\n  end\n\n  def __yale_unary_round__(precision)\n    if self.complex_dtype?\n      self.__yale_map_stored__ { |l| Complex(l.real.round(precision), l.imag.round(precision)) }\n                                    .cast(stype, dtype)\n    else\n      self.__yale_map_stored__ { |l| l.round(precision) }.cast(stype, dtype)\n    end\n  end\n\n  def __dense_unary_round__(precision)\n    if self.complex_dtype?\n      self.__dense_map__ { |l| Complex(l.real.round(precision), l.imag.round(precision)) }\n                                    .cast(stype, dtype)\n    else\n      self.__dense_map__ { |l| l.round(precision) }.cast(stype, dtype)\n    end\n  end\n\n  # These are for calculating the floor or ceil of matrix\n  def dtype_for_floor_or_ceil\n    if self.integer_dtype? or [:complex64, :complex128, :object].include?(self.dtype)\n      return_dtype = dtype\n    elsif [:float32, :float64].include?(self.dtype)\n      return_dtype = :int64\n    end\n\n    return_dtype\n  end\n\n  [:floor, :ceil].each do |meth|\n    define_method(\"__list_unary_#{meth}__\") do\n      return_dtype = dtype_for_floor_or_ceil\n\n      if [:complex64, :complex128].include?(self.dtype)\n        self.__list_map_stored__(nil) { |l| Complex(l.real.send(meth), l.imag.send(meth)) }.cast(stype, return_dtype)\n      else\n        self.__list_map_stored__(nil) { |l| l.send(meth) }.cast(stype, return_dtype)\n      end\n    end\n\n    define_method(\"__yale_unary_#{meth}__\") do\n      return_dtype = dtype_for_floor_or_ceil\n\n      if [:complex64, :complex128].include?(self.dtype)\n        self.__yale_map_stored__ { |l| Complex(l.real.send(meth), l.imag.send(meth)) }.cast(stype, return_dtype)\n      else\n        self.__yale_map_stored__ { |l| l.send(meth) }.cast(stype, return_dtype)\n      end\n    end\n\n    define_method(\"__dense_unary_#{meth}__\") do\n      return_dtype = dtype_for_floor_or_ceil\n\n      if [:complex64, :complex128].include?(self.dtype)\n        self.__dense_map__ { |l| Complex(l.real.send(meth), l.imag.send(meth)) }.cast(stype, return_dtype)\n      else\n        self.__dense_map__ { |l| l.send(meth) }.cast(stype, return_dtype)\n      end\n    end\n  end\n\n  # These take two arguments. One might be a matrix, and one might be a scalar.\n  # See also monkeys.rb, which contains Math module patches to let the first\n  # arg be a scalar\n  [:atan2, :ldexp, :hypot].each do |ewop|\n    define_method(\"__list_elementwise_#{ewop}__\") do |rhs,order|\n      if order then\n        self.__list_map_merged_stored__(rhs, nil) { |r,l| Math.send(ewop,l,r) }\n      else\n        self.__list_map_merged_stored__(rhs, nil) { |l,r| Math.send(ewop,l,r) }\n      end.cast(stype, NMatrix.upcast(dtype, :float64))\n    end\n\n    define_method(\"__dense_elementwise_#{ewop}__\") do |rhs, order|\n      if order then\n        self.__dense_map_pair__(rhs) { |r,l| Math.send(ewop,l,r) }\n      else\n        self.__dense_map_pair__(rhs) { |l,r| Math.send(ewop,l,r) }\n      end.cast(stype, NMatrix.upcast(dtype, :float64))\n    end\n\n    define_method(\"__yale_elementwise_#{ewop}__\") do |rhs, order|\n      if order then\n        self.__yale_map_merged_stored__(rhs, nil) { |r,l| Math.send(ewop,l,r) }\n      else\n        self.__yale_map_merged_stored__(rhs, nil) { |l,r| Math.send(ewop,l,r) }\n      end.cast(stype, NMatrix.upcast(dtype, :float64))\n    end\n\n    define_method(\"__list_scalar_#{ewop}__\") do |rhs,order|\n      if order then\n        self.__list_map_stored__(nil) { |l| Math.send(ewop, rhs, l) }\n      else\n        self.__list_map_stored__(nil) { |l| Math.send(ewop, l, rhs) }\n      end.cast(stype, NMatrix.upcast(dtype, :float64))\n    end\n\n    define_method(\"__yale_scalar_#{ewop}__\") do |rhs,order|\n      if order then\n        self.__yale_map_stored__ { |l| Math.send(ewop, rhs, l) }\n      else\n        self.__yale_map_stored__ { |l| Math.send(ewop, l, rhs) }\n      end.cast(stype, NMatrix.upcast(dtype, :float64))\n    end\n\n    define_method(\"__dense_scalar_#{ewop}__\") do |rhs,order|\n      if order\n        self.__dense_map__ { |l| Math.send(ewop, rhs, l) }\n      else\n        self.__dense_map__ { |l| Math.send(ewop, l, rhs) }\n      end.cast(stype, NMatrix.upcast(dtype, :float64))\n    end\n  end\n\n  # Equality operators do not involve a cast. We want to get back matrices of TrueClass and FalseClass.\n  {eqeq: :==, neq: :!=, lt: :<, gt: :>, leq: :<=, geq: :>=}.each_pair do |ewop, op|\n    define_method(\"__list_elementwise_#{ewop}__\") do |rhs|\n      self.__list_map_merged_stored__(rhs, nil) { |l,r| l.send(op,r) }\n    end\n    define_method(\"__dense_elementwise_#{ewop}__\") do |rhs|\n      self.__dense_map_pair__(rhs) { |l,r| l.send(op,r) }\n    end\n    define_method(\"__yale_elementwise_#{ewop}__\") do |rhs|\n      self.__yale_map_merged_stored__(rhs, nil) { |l,r| l.send(op,r) }\n    end\n\n    define_method(\"__list_scalar_#{ewop}__\") do |rhs|\n      self.__list_map_merged_stored__(rhs, nil) { |l,r| l.send(op,r) }\n    end\n    define_method(\"__yale_scalar_#{ewop}__\") do |rhs|\n      self.__yale_map_stored__ { |l| l.send(op,rhs) }\n    end\n    define_method(\"__dense_scalar_#{ewop}__\") do |rhs|\n      self.__dense_map__ { |l| l.send(op,rhs) }\n    end\n  end\nend\n\nif jruby?\n  require_relative \"./jruby/math.rb\"\nelse\n  require_relative \"./cruby/math.rb\"\nend\n"
  },
  {
    "path": "lib/nmatrix/mkmf.rb",
    "content": "require \"mkmf\"\n\nif RUBY_VERSION < '1.9'\n  raise NotImplementedError, \"Sorry, you need at least Ruby 1.9!\"\nend\n\n# Function derived from NArray's extconf.rb.\ndef create_conf_h(file) #:nodoc:\n  print \"creating #{file}\\n\"\n  File.open(file, 'w') do |hfile|\n    header_guard = file.upcase.sub(/\\s|\\./, '_')\n\n    hfile.puts \"#ifndef #{header_guard}\"\n    hfile.puts \"#define #{header_guard}\"\n    hfile.puts\n\n    # FIXME: Find a better way to do this:\n    hfile.puts \"#define RUBY_2 1\" if RUBY_VERSION >= '2.0'\n\n    for line in $defs\n      line =~ /^-D(.*)/\n      hfile.printf \"#define %s 1\\n\", $1\n    end\n\n    hfile.puts\n    hfile.puts \"#endif\"\n  end\nend\n\ndef find_newer_gplusplus #:nodoc:\n  print \"checking for apparent GNU g++ binary with C++0x/C++11 support... \"\n  [9,8,7,6,5,4,3].each do |minor|\n    ver = \"4.#{minor}\"\n    gpp = \"g++-#{ver}\"\n    result = `which #{gpp}`\n    next if result.empty?\n    CONFIG['CXX'] = gpp\n    puts ver\n    return CONFIG['CXX']\n  end\n  false\nend\n\ndef gplusplus_version\n  cxxvar = proc { |n| `#{CONFIG['CXX']} -E -dM - <#{File::NULL} | grep #{n}`.chomp.split(' ')[2] }\n  major = cxxvar.call('__GNUC__')\n  minor = cxxvar.call('__GNUC_MINOR__')\n  patch = cxxvar.call('__GNUC_PATCHLEVEL__')\n\n  raise(\"unable to determine g++ version (match to get version was nil)\") if major.nil? || minor.nil? || patch.nil?\n\n  \"#{major}.#{minor}.#{patch}\"\nend\n\n\nif /cygwin|mingw/ =~ RUBY_PLATFORM\n  CONFIG[\"DLDFLAGS\"] << \" --output-lib libnmatrix.a\"\nend\n\n# Fix compiler pairing\nif CONFIG['CC'] == 'clang' && CONFIG['CXX'] != 'clang++'\n  puts \"WARNING: CONFIG['CXX'] is not 'clang++' even though CONFIG['CC'] is 'clang'.\",\n       \"WARNING: Force to use clang++ together with clang.\"\n\n  CONFIG['CXX'] = 'clang++'\nend\n\nif CONFIG['CXX'] == 'clang++'\n  $CXX_STANDARD = 'c++11'\nelse\n  version = gplusplus_version\n  if version < '4.3.0' && CONFIG['CXX'] == 'g++'  # see if we can find a newer G++, unless it's been overridden by user\n    if !find_newer_gplusplus\n      raise(\"You need a version of g++ which supports -std=c++0x or -std=c++11. If you're on a Mac and using Homebrew, we recommend using mac-brew-gcc.sh to install a more recent g++.\")\n    end\n    version = gplusplus_version\n  end\n\n  if version < '4.7.0'\n    $CXX_STANDARD = 'c++0x'\n  else\n    $CXX_STANDARD = 'c++11'\n  end\n  puts \"using C++ standard... #{$CXX_STANDARD}\"\n  puts \"g++ reports version... \" + `#{CONFIG['CXX']} --version|head -n 1|cut -f 3 -d \" \"`\nend\n\n# For release, these next two should both be changed to -O3.\n$CFLAGS += \" -O3 \"\n#$CFLAGS += \" -static -O0 -g \"\n$CXXFLAGS += \" -O3 -std=#{$CXX_STANDARD} \" #-fmax-errors=10 -save-temps\n#$CXXFLAGS += \" -static -O0 -g -std=#{$CXX_STANDARD} \"\n\nif CONFIG.has_key?('warnflags')\n  CONFIG['warnflags'].gsub!('-Wshorten-64-to-32', '') # doesn't work except in Mac-patched gcc (4.2)\n  CONFIG['warnflags'].gsub!('-Wdeclaration-after-statement', '')\n  CONFIG['warnflags'].gsub!('-Wimplicit-function-declaration', '')\nend\n  \nhave_func(\"rb_array_const_ptr\", \"ruby.h\")\n"
  },
  {
    "path": "lib/nmatrix/monkeys.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == monkeys.rb\n#\n# Ruby core extensions for NMatrix.\n#++\n\n#######################\n# Classes and Modules #\n#######################\n\nclass Array\n  # Convert a Ruby Array to an NMatrix.\n  #\n  # You must provide a shape for the matrix as the first argument.\n  #\n  # == Arguments:\n  # <tt>shape</tt> :: Array describing matrix dimensions (or Integer for square).\n  #   If not provided, will be intuited through #shape.\n  # <tt>dtype</tt> :: Override data type (e.g., to store a Float as :float32\n  #   instead of :float64) -- optional.\n  # <tt>stype</tt> :: Optional storage type (defaults to :dense)\n  def to_nm(shape = nil, dtype = nil, stype = :dense)\n    elements = self.dup\n\n    guess_dtype = ->(type) {\n      case type\n      when Integer  then :int64\n      when Float    then :float64\n      when Complex  then :complex128\n      end\n    }\n\n    guess_shape = lambda { |shapey; shape|\n      # Get the size of the current dimension\n      shape = [shapey.size]\n      shape << shapey.map {|s|\n        if s.respond_to?(:size) && s.respond_to?(:map)\n          guess_shape.call(s)\n        else\n          nil\n        end\n      }\n      if shape.last.any? {|s| (s != shape.last.first) || s.nil?}\n        shape.pop\n      end\n      if (shape.first != shape.last) && shape.last.all? {|s| s == shape.last.first}\n        shape[-1] = shape.last.first\n      end\n      shape.flatten\n    }\n\n    unless shape\n      shape = guess_shape.call(elements)\n      elements.flatten!(shape.size - 1)\n      if elements.flatten != elements\n        dtype = :object\n      else\n        dtype ||= guess_dtype[elements[0]]\n      end\n    end\n\n    dtype ||= guess_dtype[self[0]]\n\n    matrix = NMatrix.new(:dense, shape, elements, dtype)\n\n    if stype != :dense then matrix.cast(stype, dtype) else matrix end\n  end\nend\n\nclass Object #:nodoc:\n  def returning(value)\n    yield(value)\n    value\n  end\nend\n\n\nmodule Math #:nodoc:\n  class << self\n    NMatrix::NMMath::METHODS_ARITY_2.each do |meth|\n      define_method \"nm_#{meth}\" do |arg0, arg1|\n        if arg0.is_a? NMatrix then\n          arg0.send(meth, arg1)\n        elsif arg1.is_a? NMatrix then\n          arg1.send(meth, arg0, true)\n        else\n          self.send(\"old_#{meth}\".to_sym, arg0, arg1)\n        end\n      end\n      alias_method \"old_#{meth}\".to_sym, meth\n      alias_method meth, \"nm_#{meth}\".to_sym\n    end\n  end\nend\n\nclass String\n  def underscore\n    self.gsub(/::/, '/').\n    gsub(/([A-Z]+)([A-Z][a-z])/,'\\1_\\2').\n    gsub(/([a-z\\d])([A-Z])/,'\\1_\\2').\n    tr(\"-\", \"_\").\n    downcase\n  end\nend\n\n# Since `autoload` will most likely be deprecated (due to multi-threading concerns),\n# we'll use `const_missing`. See: https://www.ruby-forum.com/topic/3036681 for more info.\nmodule AutoloadPatch #:nodoc\n  def const_missing(name)\n    file = name.to_s.underscore\n    require \"nmatrix/io/#{file}\"\n    klass = const_get(name)\n    return klass if klass\n  end\nend\n"
  },
  {
    "path": "lib/nmatrix/nmatrix.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == nmatrix.rb\n#\n# This file loads the C extension for NMatrix and all the ruby\n# files and contains those core functionalities which can be\n# implemented efficiently (or much more easily) in Ruby (e.g.,\n# inspect, pretty_print, element-wise operations).\n#++\n\n# For some reason nmatrix.so ends up in a different place during gem build.\n\n# Detect java\ndef jruby?\n  /java/ === RUBY_PLATFORM\nend\n\nif jruby?\n  require_relative 'jruby/nmatrix_java'\nelse\n  if File.exist?(\"lib/nmatrix/nmatrix.so\") #|| File.exist?(\"lib/nmatrix/nmatrix.bundle\")\n    # Development\n    require_relative \"nmatrix/nmatrix.so\"\n  else\n    # Gem\n    require_relative \"../nmatrix.so\"\n    require_relative './io/mat_reader'\n    require_relative './io/mat5_reader'\n    require_relative './io/market'\n    require_relative './io/point_cloud'\n\n    require_relative './lapack_core.rb'\n    require_relative './yale_functions.rb'\n  end\nend\n\nrequire_relative './math.rb'\nrequire_relative './monkeys'\n\n# NMatrix is a matrix class that supports both multidimensional arrays\n# (`:dense` stype) and sparse storage (`:list` or `:yale` stypes) and 13 data\n# types, including complex numbers, various integer and\n# floating-point sizes and ruby objects.\nclass NMatrix\n  # Read and write extensions for NMatrix.\n  module IO\n    extend AutoloadPatch\n\n    # Reader (and eventually writer) of Matlab .mat files.\n    #\n    # The .mat file format is documented in the following link:\n    # * http://www.mathworks.com/help/pdf_doc/matlab/matfile_format.pdf\n    module Matlab\n      extend AutoloadPatch\n\n      class << self\n        # call-seq:\n        #     load(mat_file_path) -> NMatrix\n        #     load_mat(mat_file_path) -> NMatrix\n        #\n        # Load a .mat file and return a NMatrix corresponding to it.\n        def load_mat(file_path)\n          NMatrix::IO::Matlab::Mat5Reader.new(File.open(file_path, \"rb+\")).to_ruby\n        end\n        alias :load :load_mat\n      end\n    end\n  end\n\n  class << self\n    # call-seq:\n    #     load_matlab_file(path) -> Mat5Reader\n    #\n    # * *Arguments* :\n    #   - +file_path+ -> The path to a version 5 .mat file.\n    # * *Returns* :\n    #   - A Mat5Reader object.\n    def load_matlab_file(file_path)\n      NMatrix::IO::Matlab::Mat5Reader.new(File.open(file_path, 'rb')).to_ruby\n    end\n\n    # call-seq:\n    #     load_pcd_file(path) -> PointCloudReader::MetaReader\n    #\n    # * *Arguments* :\n    #   - +file_path+ -> The path to a PCL PCD file.\n    # * *Returns* :\n    #   - A PointCloudReader::MetaReader object with the matrix stored in its +matrix+ property\n    def load_pcd_file(file_path)\n      NMatrix::IO::PointCloudReader::MetaReader.new(file_path)\n    end\n\n    # Calculate the size of an NMatrix of a given shape.\n    def size(shape)\n      shape = [shape,shape] unless shape.is_a?(Array)\n      (0...shape.size).inject(1) { |x,i| x * shape[i] }\n    end\n\n    # Make N-D coordinate arrays for vectorized evaluations of\n    # N-D scalar/vector fields over N-D grids, given N\n    # coordinate arrays arrs. N > 1.\n    #\n    # call-seq:\n    #     meshgrid(arrs) -> Array of NMatrix\n    #     meshgrid(arrs, options) -> Array of NMatrix\n    #\n    # * *Arguments* :\n    #   - +vectors+ -> Array of N coordinate arrays (Array or NMatrix), if any have more than one dimension they will be flatten\n    #   - +options+ -> Hash with options (:sparse Boolean, false by default; :indexing Symbol, may be :ij or :xy, :xy by default)\n    # * *Returns* :\n    #   - Array of N N-D NMatrixes\n    # * *Examples* :\n    #     x, y = NMatrix::meshgrid([[1, [2, 3]], [4, 5]])\n    #     x.to_a #<= [[1, 2, 3], [1, 2, 3]]\n    #     y.to_a #<= [[4, 4, 4], [5, 5, 5]]\n    #\n    # * *Using* *options* :\n    #\n    #     x, y = NMatrix::meshgrid([[[1, 2], 3], [4, 5]], sparse: true)\n    #     x.to_a #<= [[1, 2, 3]]\n    #     y.to_a #<= [[4], [5]]\n    #\n    #     x, y = NMatrix::meshgrid([[1, 2, 3], [[4], 5]], indexing: :ij)\n    #     x.to_a #<= [[1, 1], [2, 2], [3, 3]]\n    #     y.to_a #<= [[4, 5], [4, 5], [4, 5]]\n    def meshgrid(vectors, options = {})\n      raise(ArgumentError, 'Expected at least 2 arrays.') if vectors.size < 2\n      options[:indexing] ||= :xy\n      raise(ArgumentError, 'Indexing must be :xy of :ij') unless [:ij, :xy].include? options[:indexing]\n      mats = vectors.map { |arr| arr.respond_to?(:flatten) ? arr.flatten : arr.to_flat_array }\n      mats[0], mats[1] = mats[1], mats[0] if options[:indexing] == :xy\n      new_dim = mats.size\n      lengths = mats.map(&:size)\n      result = mats.map.with_index do |matrix, axis|\n        if options[:sparse]\n          new_shape = Array.new(new_dim, 1)\n          new_shape[axis] = lengths[axis]\n          new_elements = matrix\n        else\n          before_axis = lengths[0...axis].reduce(:*)\n          after_axis = lengths[(axis+1)..-1].reduce(:*)\n          new_shape = lengths\n          new_elements = after_axis ? matrix.map{ |el| [el] * after_axis }.flatten : matrix\n          new_elements *= before_axis if before_axis\n        end\n        NMatrix.new(new_shape, new_elements)\n      end\n      result[0], result[1] = result[1], result[0] if options[:indexing] == :xy\n      result\n    end\n  end\n\n  # TODO: Make this actually pretty.\n  def pretty_print(q) #:nodoc:\n    if self.shape.size > 1 and self.shape[1] > 100\n      self.inspect.pretty_print(q)\n    elsif self.dim > 3 || self.dim == 1\n      self.to_a.pretty_print(q)\n    else\n      # iterate through the whole matrix and find the longest number\n      longest = Array.new(self.shape[1], 0)\n      self.each_column.with_index do |col, j|\n        col.each do |elem|\n          elem_len   = elem.inspect.size\n          longest[j] = elem_len if longest[j] < elem_len\n        end\n      end\n\n      if self.dim == 3\n        q.group(0, \"\\n{ layers:\", \"}\") do\n          self.each_layer.with_index do |layer,k|\n            q.group(0, \"\\n  [\\n\", \"  ]\\n\") do\n              layer.each_row.with_index do |row,i|\n                q.group(0, \"    [\", \"]\\n\") do\n                  q.seplist(self[i,0...self.shape[1],k].to_flat_array, lambda { q.text \", \"}, :each_with_index) { |v,j| q.text v.inspect.rjust(longest[j]) }\n                end\n              end\n            end\n          end\n        end\n      else # dim 2\n        q.group(0, \"\\n[\\n \", \"]\") do\n          self.each_row.with_index do |row, i|\n            q.group(1, \" [\", \"]\\n\") do\n              q.seplist(row.to_a, -> { q.text \", \" }, :each_with_index) do |v,j|\n                q.text v.inspect.rjust(longest[j])\n              end\n            end\n            q.breakable unless i + 1 == self.shape[0]\n          end\n        end\n      end\n    end\n  end\n\n  #\n  # call-seq:\n  #     cast(stype, dtype, default) -> NMatrix\n  #     cast(stype, dtype) -> NMatrix\n  #     cast(stype) -> NMatrix\n  #     cast(options) -> NMatrix\n  #\n  # This is a user-friendly helper for calling #cast_full. The easiest way to call this function is using an\n  # options hash, e.g.,\n  #\n  #     n.cast(:stype => :yale, :dtype => :int64, :default => false)\n  #\n  # For list and yale, :default sets the \"default value\" or \"init\" of the matrix. List allows a bit more freedom\n  # since non-zeros are permitted. For yale, unpredictable behavior may result if the value is not false, nil, or\n  # some version of 0. Dense discards :default.\n  #\n  # dtype and stype are inferred from the matrix upon which #cast is called -- so you only really need to provide\n  # one. You can actually call this function with no arguments, in which case it functions like #clone.\n  #\n  # If your dtype is :object and you are converting from :dense to a sparse type, it is recommended that you\n  # provide a :default, as 0 may behave differently from its Float or Complex equivalent. If no option\n  # is given, Integer 0 will be used.\n  def cast(*params)\n    if (params.size > 0 && params[0].is_a?(Hash))\n      opts = {\n          :stype => self.stype,\n          :dtype => self.dtype,\n          :default => self.stype == :dense ? 0 : self.default_value\n      }.merge(params[0])\n\n      self.cast_full(opts[:stype], opts[:dtype], opts[:default])\n    else\n      params << self.stype if params.size == 0\n      params << self.dtype if params.size == 1\n      #HACK: the default value can cause an exception if dtype is not complex\n      #and default_value is. (The ruby C code apparently won't convert these.)\n      #Perhaps this should be fixed in the C code (in rubyval_to_cval).\n      default_value = maybe_get_noncomplex_default_value(params[1])\n      params << (self.stype == :dense ? 0 : default_value) if params.size == 2\n      self.cast_full(*params)\n    end\n\n  end\n\n\n  #\n  # call-seq:\n  #     rows -> Integer\n  #\n  # This shortcut use #shape to return the number of rows (the first dimension)\n  # of the matrix.\n  #\n  def rows\n    shape[0]\n  end\n\n  #\n  # call-seq:\n  #     cols -> Integer\n  #\n  # This shortcut use #shape to return the number of columns (the second\n  # dimension) of the matrix.\n  #\n  def cols\n    shape[1]\n  end\n\n  # Return the main diagonal or antidiagonal a matrix. Only works with 2D matrices.\n  #\n  # == Arguments\n  #\n  # * +main_diagonal+ - Defaults to true. If passed 'false', then will return the\n  #   antidiagonal of the matrix.\n  #\n  # == References\n  #\n  # * http://en.wikipedia.org/wiki/Main_diagonal\n  def diagonal main_diagonal=true\n    diag_size = [cols, rows].min\n    diag = NMatrix.new [diag_size], dtype: dtype\n\n    if main_diagonal\n      0.upto(diag_size-1) do |i|\n        diag[i] = self[i,i]\n      end\n    else\n      row = 0\n      (diag_size-1).downto(0) do |col|\n        diag[row] = self[row,col]\n        row += 1\n      end\n    end\n\n    diag\n  end\n\n  #\n  # call-seq:\n  #     to_hash -> Hash\n  #\n  # Create a Ruby Hash from an NMatrix.\n  #\n  def to_hash\n    if stype == :yale\n      h = {}\n      each_stored_with_indices do |val,i,j|\n        next if val == 0 # Don't bother storing the diagonal zero values -- only non-zeros.\n        if h.has_key?(i)\n          h[i][j] = val\n        else\n          h[i] = {j => val}\n        end\n      end\n      h\n    else # dense and list should use a C internal function.\n      # FIXME: Write a C internal to_h function.\n      m = stype == :dense ? self.cast(:list, self.dtype) : self\n      m.__list_to_hash__\n    end\n  end\n  alias :to_h :to_hash\n\n\n  def inspect #:nodoc:\n    original_inspect = super()\n    original_inspect = original_inspect[0...original_inspect.size-1]\n    original_inspect + \" \" + inspect_helper.join(\" \") + \">\"\n  end\n\n  def __yale_ary__to_s(sym) #:nodoc:\n    ary = self.send(\"__yale_#{sym.to_s}__\".to_sym)\n\n    '[' + ary.collect { |a| a ? a : 'nil'}.join(',') + ']'\n  end\n\n\n  # call-seq:\n  #   integer_dtype?() -> Boolean\n  #\n  # Checks if dtype is an integer type\n  #\n  def integer_dtype?\n    [:byte, :int8, :int16, :int32, :int64].include?(self.dtype)\n  end\n\n  # call-seq:\n  #   float_dtype?() -> Boolean\n  #\n  # Checks if dtype is a floating point type\n  #\n  def float_dtype?\n    [:float32, :float64].include?(dtype)\n  end\n\n  ##\n  # call-seq:\n  #   complex_dtype?() -> Boolean\n  #\n  # Checks if dtype is a complex type\n  #\n  def complex_dtype?\n    [:complex64, :complex128].include?(self.dtype)\n  end\n\n  ##\n  # call-seq:\n  #\n  # object_dtype?() -> Boolean\n  #\n  # Checks if dtype is a ruby object\n  def object_dtype?\n    dtype == :object\n  end\n\n\n  #\n  # call-seq:\n  #     to_f -> Float\n  #\n  # Converts an nmatrix with a single element (but any number of dimensions)\n  #  to a float.\n  #\n  # Raises an IndexError if the matrix does not have just a single element.\n  #\n  def to_f\n    raise IndexError, 'to_f only valid for matrices with a single element' unless shape.all? { |e| e == 1 }\n    self[*Array.new(shape.size, 0)]\n  end\n\n  #\n  # call-seq:\n  #     to_flat_array -> Array\n  #     to_flat_a -> Array\n  #\n  # Converts an NMatrix to a one-dimensional Ruby Array.\n  #\n  def to_flat_array\n    ary = Array.new(self.size)\n    self.each.with_index { |v,i| ary[i] = v }\n    ary\n  end\n  alias :to_flat_a :to_flat_array\n\n  #\n  # call-seq:\n  #     size -> Integer\n  #\n  # Returns the total size of the NMatrix based on its shape.\n  #\n  def size\n    NMatrix.size(self.shape)\n  end\n\n\n  def to_s #:nodoc:\n    self.to_flat_array.to_s\n  end\n\n  #\n  # call-seq:\n  #     nvector? -> true or false\n  #\n  # Shortcut function for determining whether the effective dimension is less than the dimension.\n  # Useful when we take slices of n-dimensional matrices where n > 2.\n  #\n  def nvector?\n    self.effective_dim < self.dim\n  end\n\n  #\n  # call-seq:\n  #     vector? -> true or false\n  #\n  # Shortcut function for determining whether the effective dimension is 1. See also #nvector?\n  #\n  def vector?\n    self.effective_dim == 1\n  end\n\n\n  #\n  # call-seq:\n  #     to_a -> Array\n  #\n  # Converts an NMatrix to an array of arrays, or an NMatrix of effective dimension 1 to an array.\n  #\n  # Does not yet work for dimensions > 2\n  def to_a(dimen=nil)\n    if self.dim == 2\n\n      return self.to_flat_a if self.shape[0] == 1\n\n      ary = []\n      begin\n        self.each_row do |row|\n          ary << row.to_flat_a\n        end\n      #rescue NotImplementedError # Oops. Try copying instead\n      #  self.each_row(:copy) do |row|\n      #    ary << row.to_a.flatten\n      #  end\n      end\n      ary\n    else\n      to_a_rec(0)\n    end\n  end\n\n\n  #\n  # call-seq:\n  #     rank(dimension, row_or_column_number) -> NMatrix\n  #     rank(dimension, row_or_column_number, :reference) -> NMatrix reference slice\n  #\n  # Returns the rank (e.g., row, column, or layer) specified, using slicing by copy as default.\n  #\n  # See @row (dimension = 0), @column (dimension = 1)\n  def rank(shape_idx, rank_idx, meth = :copy)\n\n    if shape_idx > (self.dim-1)\n      raise(RangeError, \"#rank call was out of bounds\")\n    end\n\n    params = Array.new(self.dim)\n    params.each.with_index do |v,d|\n      params[d] = d == shape_idx ? rank_idx : 0...self.shape[d]\n    end\n\n    meth == :reference ? self[*params] : self.slice(*params)\n  end\n\n  #\n  # call-seq:\n  #     column(column_number) -> NMatrix\n  #     column(column_number, get_by) -> NMatrix\n  #\n  # Returns the column specified. Uses slicing by copy as default.\n  #\n  # * *Arguments* :\n  #   - +column_number+ -> Integer.\n  #   - +get_by+ -> Type of slicing to use, +:copy+ or +:reference+.\n  # * *Returns* :\n  #   - A NMatrix representing the requested column as a column vector.\n  #\n  # Examples:\n  #\n  #   m = NMatrix.new(2, [1, 4, 9, 14], :int32) # =>  1   4\n  #                                                   9  14\n  #\n  #   m.column(1) # =>   4\n  #                     14\n  #\n  def column(column_number, get_by = :copy)\n    rank(1, column_number, get_by)\n  end\n\n  alias :col :column\n\n  #\n  # call-seq:\n  #     row(row_number) -> NMatrix\n  #     row(row_number, get_by) -> NMatrix\n  #\n  # * *Arguments* :\n  #   - +row_number+ -> Integer.\n  #   - +get_by+ -> Type of slicing to use, +:copy+ or +:reference+.\n  # * *Returns* :\n  #   - An NMatrix representing the requested row as a row vector.\n  #\n  def row(row_number, get_by = :copy)\n    rank(0, row_number, get_by)\n  end\n\n  #\n  # call-seq:\n  #     last -> Element of self.dtype\n  #\n  # Returns the last element stored in an NMatrix\n  #\n  def last\n    self[*Array.new(self.dim, -1)]\n  end\n\n\n  #\n  # call-seq:\n  #     reshape(new_shape) -> NMatrix\n  #\n  # Clone a matrix, changing the shape in the process. Note that this function does not do a resize; the product of\n  # the new and old shapes' components must be equal.\n  #\n  # * *Arguments* :\n  #   - +new_shape+ -> Array of positive Integers.\n  # * *Returns* :\n  #   - A copy with a different shape.\n  #\n  def reshape new_shape,*shapes\n    if new_shape.is_a?Integer\n      newer_shape =  [new_shape]+shapes\n    else  # new_shape is an Array\n      newer_shape = new_shape\n    end\n    t = reshape_clone_structure(newer_shape)\n    left_params  = [:*]*newer_shape.size\n    right_params = [:*]*self.shape.size\n    t[*left_params] = self[*right_params]\n    t\n  end\n\n\n  #\n  # call-seq:\n  #     reshape!(new_shape) -> NMatrix\n  #     reshape! new_shape  -> NMatrix\n  #\n  # Reshapes the matrix (in-place) to the desired shape. Note that this function does not do a resize; the product of\n  # the new and old shapes' components must be equal.\n  #\n  # * *Arguments* :\n  #   - +new_shape+ -> Array of positive Integer.\n  #\n  def reshape! new_shape,*shapes\n    if self.is_ref?\n      raise(ArgumentError, \"This operation cannot be performed on reference slices\")\n    else\n      if new_shape.is_a?Integer\n        shape =  [new_shape]+shapes\n      else  # new_shape is an Array\n        shape = new_shape\n      end\n      self.reshape_bang(shape)\n    end\n  end\n\n  #\n  # call-seq:\n  #     transpose -> NMatrix\n  #     transpose(permutation) -> NMatrix\n  #\n  # Clone a matrix, transposing it in the process. If the matrix is two-dimensional, the permutation is taken to be [1,0]\n  # automatically (switch dimension 0 with dimension 1). If the matrix is n-dimensional, you must provide a permutation\n  # of +0...n+.\n  #\n  # * *Arguments* :\n  #   - +permutation+ -> Optional Array giving a permutation.\n  # * *Returns* :\n  #   - A copy of the matrix, but transposed.\n  #\n  def transpose(permute = nil)\n    if permute.nil?\n      if self.dim == 1\n        return self.clone\n      elsif self.dim == 2\n        new_shape = [self.shape[1], self.shape[0]]\n      else\n        raise(ArgumentError, \"need permutation array of size #{self.dim}\")\n      end\n    elsif !permute.is_a?(Array) || permute.sort.uniq != (0...self.dim).to_a\n      raise(ArgumentError, \"invalid permutation array\")\n    else\n      # Figure out the new shape based on the permutation given as an argument.\n      new_shape = permute.map { |p| self.shape[p] }\n    end\n\n    if self.dim > 2 # FIXME: For dense, several of these are basically equivalent to reshape.\n\n      # Make the new data structure.\n      t = self.reshape_clone_structure(new_shape)\n\n      self.each_stored_with_indices do |v,*indices|\n        p_indices = permute.map { |p| indices[p] }\n        t[*p_indices] = v\n      end\n      t\n    elsif self.list? # TODO: Need a C list transposition algorithm.\n      # Make the new data structure.\n      t = self.reshape_clone_structure(new_shape)\n\n      self.each_column.with_index do |col,j|\n        t[j,:*] = col.to_flat_array\n      end\n      t\n    else\n      # Call C versions of Yale and List transpose, which do their own copies\n      if jruby?\n        nmatrix = NMatrix.new :copy\n        nmatrix.shape = [@shape[1],@shape[0]]\n        twoDMat = self.twoDMat.transpose\n        nmatrix.s = ArrayRealVector.new(ArrayGenerator.getArrayDouble(twoDMat.getData(), shape[1],shape[0]))\n        return nmatrix\n      else\n        self.clone_transpose\n      end\n    end\n  end\n\n\n  # call-seq:\n  #     matrix1.concat(*m2) -> NMatrix\n  #     matrix1.concat(*m2, rank) -> NMatrix\n  #     matrix1.hconcat(*m2) -> NMatrix\n  #     matrix1.vconcat(*m2) -> NMatrix\n  #     matrix1.dconcat(*m3) -> NMatrix\n  #\n  # Joins two matrices together into a new larger matrix. Attempts to determine\n  # which direction to concatenate on by looking for the first common element\n  # of the matrix +shape+ in reverse. In other words, concatenating two columns\n  # together without supplying +rank+ will glue them into an n x 2 matrix.\n  #\n  # You can also use hconcat, vconcat, and dconcat for the first three ranks.\n  # concat performs an hconcat when no rank argument is provided.\n  #\n  # The two matrices must have the same +dim+.\n  #\n  # * *Arguments* :\n  #   - +matrices+ -> one or more matrices\n  #   - +rank+ -> Integer (for rank); alternatively, may use :row, :column, or\n  #   :layer for 0, 1, 2, respectively\n  def concat(*matrices)\n    rank = nil\n    rank = matrices.pop unless matrices.last.is_a?(NMatrix)\n\n    # Find the first matching dimension and concatenate along that (unless rank is specified)\n    if rank.nil?\n      rank = self.dim-1\n      self.shape.reverse_each.with_index do |s,i|\n        matrices.each do |m|\n          if m.shape[i] != s\n            rank -= 1\n            break\n          end\n        end\n      end\n    elsif rank.is_a?(Symbol) # Convert to numeric\n      rank = {:row => 0, :column => 1, :col => 1, :lay => 2, :layer => 2}[rank]\n    end\n\n    # Need to figure out the new shape.\n    new_shape = self.shape.dup\n    new_shape[rank] = matrices.inject(self.shape[rank]) { |total,m| total + m.shape[rank] }\n\n    # Now figure out the options for constructing the concatenated matrix.\n    opts = {stype: self.stype, default: self.default_value, dtype: self.dtype}\n    if self.yale?\n      # We can generally predict the new capacity for Yale. Subtract out the number of rows\n      # for each matrix being concatenated, and then add in the number of rows for the new\n      # shape. That takes care of the diagonal. The rest of the capacity is represented by\n      # the non-diagonal non-default values.\n      new_cap = matrices.inject(self.capacity - self.shape[0]) do |total,m|\n        total + m.capacity - m.shape[0]\n      end - self.shape[0] + new_shape[0]\n      opts = {capacity: new_cap}.merge(opts)\n    end\n\n    # Do the actual construction.\n    n = NMatrix.new(new_shape, opts)\n\n    # Figure out where to start concatenation. We don't know where it will end,\n    # because each matrix may have own size along concat dimension.\n    pos = Array.new(self.dim) { 0 }\n\n    matrices.unshift(self)\n    matrices.each do |m|\n      # Figure out where to start and stop the concatenation. We'll use\n      # NMatrices instead of Arrays because then we can do elementwise addition.\n      ranges = m.shape.map.with_index { |s,i| pos[i]...(pos[i] + s) }\n\n      n[*ranges] = m\n\n      # Move over by the requisite amount\n      pos[rank] = pos[rank] + m.shape[rank]\n    end\n\n    n\n  end\n\n  # Horizontal concatenation with +matrices+.\n  def hconcat(*matrices)\n    concat(*matrices, :column)\n  end\n\n  # Vertical concatenation with +matrices+.\n  def vconcat(*matrices)\n    concat(*matrices, :row)\n  end\n\n  # Depth concatenation with +matrices+.\n  def dconcat(*matrices)\n    concat(*matrices, :layer)\n  end\n\n\n  #\n  # call-seq:\n  #     upper_triangle -> NMatrix\n  #     upper_triangle(k) -> NMatrix\n  #     triu -> NMatrix\n  #     triu(k) -> NMatrix\n  #\n  # Returns the upper triangular portion of a matrix. This is analogous to the +triu+ method\n  # in MATLAB.\n  #\n  # * *Arguments* :\n  #   - +k+ -> Positive integer. How many extra diagonals to include in the upper triangular portion.\n  #\n  def upper_triangle(k = 0)\n    raise(NotImplementedError, \"only implemented for 2D matrices\") if self.shape.size > 2\n\n    t = self.clone_structure\n    (0...self.shape[0]).each do |i|\n      if i - k < 0\n        t[i, :*] = self[i, :*]\n      else\n        t[i, 0...(i-k)]             = 0\n        t[i, (i-k)...self.shape[1]] = self[i, (i-k)...self.shape[1]]\n      end\n    end\n    t\n  end\n  alias :triu :upper_triangle\n\n\n  #\n  # call-seq:\n  #     upper_triangle! -> NMatrix\n  #     upper_triangle!(k) -> NMatrix\n  #     triu! -> NMatrix\n  #     triu!(k) -> NMatrix\n  #\n  # Deletes the lower triangular portion of the matrix (in-place) so only the upper portion remains.\n  #\n  # * *Arguments* :\n  #   - +k+ -> Integer. How many extra diagonals to include in the deletion.\n  #\n  def upper_triangle!(k = 0)\n    raise(NotImplementedError, \"only implemented for 2D matrices\") if self.shape.size > 2\n\n    (0...self.shape[0]).each do |i|\n      if i - k >= 0\n        self[i, 0...(i-k)] = 0\n      end\n    end\n    self\n  end\n  alias :triu! :upper_triangle!\n\n\n  #\n  # call-seq:\n  #     lower_triangle -> NMatrix\n  #     lower_triangle(k) -> NMatrix\n  #     tril -> NMatrix\n  #     tril(k) -> NMatrix\n  #\n  # Returns the lower triangular portion of a matrix. This is analogous to the +tril+ method\n  # in MATLAB.\n  #\n  # * *Arguments* :\n  #   - +k+ -> Integer. How many extra diagonals to include in the lower triangular portion.\n  #\n  def lower_triangle(k = 0)\n    raise(NotImplementedError, \"only implemented for 2D matrices\") if self.shape.size > 2\n\n    t = self.clone_structure\n    (0...self.shape[0]).each do |i|\n      if i + k >= shape[0]\n        t[i, :*] = self[i, :*]\n      else\n        t[i, (i+k+1)...self.shape[1]] = 0\n        t[i, 0..(i+k)] = self[i, 0..(i+k)]\n      end\n    end\n    t\n  end\n  alias :tril :lower_triangle\n\n\n  #\n  # call-seq:\n  #     lower_triangle! -> NMatrix\n  #     lower_triangle!(k) -> NMatrix\n  #     tril! -> NMatrix\n  #     tril!(k) -> NMatrix\n  #\n  # Deletes the upper triangular portion of the matrix (in-place) so only the lower portion remains.\n  #\n  # * *Arguments* :\n  #   - +k+ -> Integer. How many extra diagonals to include in the deletion.\n  #\n  def lower_triangle!(k = 0)\n    raise(NotImplementedError, \"only implemented for 2D matrices\") if self.shape.size > 2\n\n    (0...self.shape[0]).each do |i|\n      if i + k < shape[0]\n        self[i, (i+k+1)...self.shape[1]] = 0\n      end\n    end\n    self\n  end\n  alias :tril! :lower_triangle!\n\n\n  #\n  # call-seq:\n  #     layer(layer_number) -> NMatrix\n  #     row(layer_number, get_by) -> NMatrix\n  #\n  # * *Arguments* :\n  #   - +layer_number+ -> Integer.\n  #   - +get_by+ -> Type of slicing to use, +:copy+ or +:reference+.\n  # * *Returns* :\n  #   - A NMatrix representing the requested layer as a layer vector.\n  #\n  def layer(layer_number, get_by = :copy)\n    layer = rank(2, layer_number, get_by)\n\n    if jruby?\n      nmatrix = NMatrix.new :copy\n      nmatrix.shape = layer.shape\n      nmatrix.s = layer.s\n      return nmatrix\n    else\n      layer\n    end\n\n  end\n\n\n\n  #\n  # call-seq:\n  #     shuffle! -> ...\n  #     shuffle!(random: rng) -> ...\n  #\n  # Re-arranges the contents of an NVector.\n  #\n  # TODO: Write more efficient version for Yale, list.\n  # TODO: Generalize for more dimensions.\n  def shuffle!(*args)\n    method_missing(:shuffle!, *args) if self.effective_dim > 1\n    ary = self.to_flat_a\n    ary.shuffle!(*args)\n    ary.each.with_index { |v,idx| self[idx] = v }\n    self\n  end\n\n\n  #\n  # call-seq:\n  #     shuffle -> ...\n  #     shuffle(rng) -> ...\n  #\n  # Re-arranges the contents of an NVector.\n  #\n  # TODO: Write more efficient version for Yale, list.\n  # TODO: Generalize for more dimensions.\n  def shuffle(*args)\n    method_missing(:shuffle!, *args) if self.effective_dim > 1\n    t = self.clone\n    t.shuffle!(*args)\n  end\n\n\n  #\n  # call-seq:\n  #     sorted_indices -> Array\n  #\n  # Returns an array of the indices ordered by value sorted.\n  #\n  def sorted_indices\n    return method_missing(:sorted_indices) unless vector?\n    ary = self.to_flat_array\n    ary.each_index.sort_by { |i| ary[i] }  # from: http://stackoverflow.com/a/17841159/170300\n  end\n\n\n  #\n  # call-seq:\n  #     binned_sorted_indices -> Array\n  #\n  # Returns an array of arrays of indices ordered by value sorted. Functions basically like +sorted_indices+, but\n  # groups indices together for those values that are the same.\n  #\n  def binned_sorted_indices\n    return method_missing(:sorted_indices) unless vector?\n    ary = self.to_flat_array\n    ary2 = []\n    last_bin = ary.each_index.sort_by { |i| [ary[i]] }.inject([]) do |result, element|\n      if result.empty? || ary[result[-1]] == ary[element]\n        result << element\n      else\n        ary2 << result\n        [element]\n      end\n    end\n    ary2 << last_bin unless last_bin.empty?\n    ary2\n  end\n\n\n  def method_missing name, *args, &block #:nodoc:\n    if name.to_s =~ /^__list_elementwise_.*__$/\n      raise NotImplementedError, \"requested undefined list matrix element-wise operation\"\n    elsif name.to_s =~ /^__yale_scalar_.*__$/\n      raise NotImplementedError, \"requested undefined yale scalar element-wise operation\"\n    else\n      super(name, *args, &block)\n    end\n  end\n\n\n  def respond_to?(method, include_all = false) #:nodoc:\n    if [:shuffle, :shuffle!, :each_with_index, :sorted_indices, :binned_sorted_indices, :nrm2, :asum].include?(method.intern) # vector-only methods\n      return vector?\n    elsif [:each_layer, :layer].include?(method.intern) # 3-or-more dimensions only\n      return dim > 2\n    else\n      super\n    end\n  end\n\n\n  #\n  # call-seq:\n  #     inject -> symbol\n  #\n  # This overrides the inject function to use map_stored for yale matrices\n  #\n  def inject(sym)\n    return super(sym) unless self.yale?\n    return self.map_stored.inject(sym)\n  end\n\n  # Returns the index of the first occurence of the specified value. Returns\n  # an array containing the position of the value, nil in case the value is not found.\n  #\n  def index(value)\n    index = nil\n\n    self.each_with_indices do |yields|\n      if yields.first == value\n        yields.shift\n        index = yields\n        break\n      end\n    end\n\n    index\n  end\n\n  #\n  # call-seq:\n  #     clone_structure -> NMatrix\n  #\n  # This function is like clone, but it only copies the structure and the default value.\n  # None of the other values are copied. It takes an optional capacity argument. This is\n  # mostly only useful for dense, where you may not want to initialize; for other types,\n  # you should probably use +zeros_like+.\n  #\n  def clone_structure(capacity = nil)\n    opts = {stype: self.stype, default: self.default_value, dtype: self.dtype}\n    opts = {capacity: capacity}.merge(opts) if self.yale?\n    NMatrix.new(self.shape, opts)\n  end\n\n  #\n  # call-seq:\n  #     repeat(count, axis) -> NMatrix\n  #\n  # * *Arguments* :\n  #   - +count+ -> how many times NMatrix should be repeated\n  #   - +axis+ -> index of axis along which NMatrix should be repeated\n  # * *Returns* :\n  #   - NMatrix created by repeating the existing one along an axis\n  # * *Examples* :\n  #     m = NMatrix.new([2, 2], [1, 2, 3, 4])\n  #     m.repeat(2, 0).to_a #<= [[1, 2], [3, 4], [1, 2], [3, 4]]\n  #     m.repeat(2, 1).to_a #<= [[1, 2, 1, 2], [3, 4, 3, 4]]\n  def repeat(count, axis)\n    raise(ArgumentError, 'Matrix should be repeated at least 2 times.') if count < 2\n    new_shape = shape\n    new_shape[axis] *= count\n    new_matrix = NMatrix.new(new_shape, dtype: dtype)\n    slice = new_shape.map { |axis_size| 0...axis_size }\n    start = 0\n    count.times do\n      slice[axis] = start...(start += shape[axis])\n      new_matrix[*slice] = self\n    end\n    new_matrix\n  end\n\n  # This is how you write an individual element-wise operation function:\n  #def __list_elementwise_add__ rhs\n  #  self.__list_map_merged_stored__(rhs){ |l,r| l+r }.cast(self.stype, NMatrix.upcast(self.dtype, rhs.dtype))\n  #end\nprotected\n\n  def inspect_helper #:nodoc:\n    ary = []\n    ary << \"shape:[#{shape.join(',')}]\" << \"dtype:#{dtype}\" << \"stype:#{stype}\"\n\n    if stype == :yale\n      ary << \"capacity:#{capacity}\"\n\n      # These are enabled by the DEBUG_YALE compiler flag in extconf.rb.\n      if respond_to?(:__yale_a__)\n        ary << \"ija:#{__yale_ary__to_s(:ija)}\" << \"ia:#{__yale_ary__to_s(:ia)}\" <<\n          \"ja:#{__yale_ary__to_s(:ja)}\" << \"a:#{__yale_ary__to_s(:a)}\" << \"d:#{__yale_ary__to_s(:d)}\" <<\n          \"lu:#{__yale_ary__to_s(:lu)}\" << \"yale_size:#{__yale_size__}\"\n      end\n\n    end\n\n    ary\n  end\n\n\n  # Clone the structure as needed for a reshape\n  def reshape_clone_structure(new_shape) #:nodoc:\n    raise(ArgumentError, \"reshape cannot resize; size of new and old matrices must match\") unless self.size == new_shape.inject(1) { |p,i| p *= i }\n\n    opts = {stype: self.stype, default: self.default_value, dtype: self.dtype}\n    if self.yale?\n      # We can generally predict the change in capacity for Yale.\n      opts = {capacity: self.capacity - self.shape[0] + new_shape[0]}.merge(opts)\n    end\n    NMatrix.new(new_shape, opts)\n  end\n\n\n  # Helper for converting a matrix into an array of arrays recursively\n  def to_a_rec(dimen = 0) #:nodoc:\n    return self.flat_map { |v| v } if dimen == self.dim-1\n\n    ary = []\n    self.each_rank(dimen) do |sect|\n      ary << sect.to_a_rec(dimen+1)\n    end\n    ary\n  end\n\n\n  # NMatrix constructor helper for sparse matrices. Uses multi-slice-setting to initialize a matrix\n  # with a given array of initial values.\n  def __sparse_initial_set__(ary) #:nodoc:\n    self[0...self.shape[0],0...self.shape[1]] = ary\n  end\n\n\n  # This function assumes that the shapes of the two matrices have already\n  # been tested and are the same.\n  #\n  # Called from inside NMatrix: nm_eqeq\n  #\n  # There are probably more efficient ways to do this, but currently it's unclear how.\n  # We could use +each_row+, but for list matrices, it's still going to need to make a\n  # reference to each of those rows, and that is going to require a seek.\n  #\n  # It might be more efficient to convert one sparse matrix type to the other with a\n  # cast and then run the comparison. For now, let's assume that people aren't going\n  # to be doing this very often, and we can optimize as needed.\n  def dense_eql_sparse? m #:nodoc:\n    m.each_with_indices do |v,*indices|\n      return false if self[*indices] != v\n    end\n\n    return true\n  end\n  alias :sparse_eql_sparse? :dense_eql_sparse?\n\n\n  #\n  # See the note in #cast about why this is necessary.\n  # If this is a non-dense matrix with a complex dtype and to_dtype is\n  # non-complex, then this will convert the default value to noncomplex.\n  # Returns 0 if dense.  Returns existing default_value if there isn't a\n  # mismatch.\n  #\n  def maybe_get_noncomplex_default_value(to_dtype) #:nodoc:\n    default_value = 0\n    unless self.stype == :dense then\n      if self.dtype.to_s.start_with?('complex') and not to_dtype.to_s.start_with?('complex') then\n        default_value = self.default_value.real\n      else\n        default_value = self.default_value\n      end\n    end\n    default_value\n  end\n\nend\n\nrequire_relative './shortcuts.rb'\nrequire_relative './enumerate.rb'\n\nrequire_relative './version.rb'\nrequire_relative './blas.rb'\n"
  },
  {
    "path": "lib/nmatrix/rspec.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == rspec.rb\n#\n# Monkey patches for RSpec improving its ability to work well with\n# NMatrix (particularly #be_within).\n#\n\nrequire 'rspec'\n\n# Amend RSpec to allow #be_within for matrices.\nmodule RSpec::Matchers::BuiltIn\n  class BeWithin\n\n    def of(expected)\n      @expected = expected\n      @unit     = ''\n      if expected.is_a?(NMatrix)\n        @tolerance = if @delta.is_a?(NMatrix)\n                       @delta.abs\n                     elsif @delta.is_a?(Array)\n                       NMatrix.new(:dense, expected.shape, @delta, :object).abs.cast(:dtype => expected.abs_dtype)\n                     else\n                       (NMatrix.ones_like(expected) * @delta).abs\n                     end\n      else\n        @tolerance = @delta\n      end\n\n      self\n    end\n\n    def percent_of(expected)\n      @expected  = expected\n      @unit      = '%'\n      @tolerance = @expected.abs * @delta / 100.0 # <- only change is to reverse abs and @delta\n      self\n    end\n\n    def matches?(actual)\n      @actual = actual\n      raise needs_expected     unless defined? @expected\n      raise needs_subtractable unless @actual.respond_to? :-\n      res = (@actual - @expected).abs <= @tolerance\n\n      #if res.is_a?(NMatrix)\n      #  require 'pry'\n      #  binding.pry\n      #end\n\n      res.is_a?(NMatrix) ? !res.any? { |x| !x } : res\n    end\n\n  end\nend\n"
  },
  {
    "path": "lib/nmatrix/shortcuts.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == shortcuts.rb\n#\n# These are shortcuts for NMatrix and NVector creation, contributed by Daniel\n# Carrera (dcarrera@hush.com) and Carlos Agarie (carlos.agarie@gmail.com).\n#\n# TODO Make all the shortcuts available through modules, allowing someone\n# to include them to make \"MATLAB-like\" scripts.\n#\n# There are some questions to be answered before this can be done, tho.\n#++\n\nclass NMatrix\n\n  # Methods for generating magic matrix.\n  module MagicHelpers\n    class << self\n      def odd_magic(nm, shape)\n        row = shape - 1\n        col = shape / 2       \n        nm[row,col] = 1\n        (2..shape * shape).each do |index|\n          if nm[(row + 1) % shape,(col + 1) % shape] == 0\n            row = (row + 1) % shape\n            col = (col + 1) % shape\n          else\n            row = (row - 1 + shape) % shape\n          end\n            nm[row,col] = index\n        end\n      end\n    \n      def doubly_even_magic(nm, shape)\n        mini_square_num = shape / 4\n        count = 1     \n        inv_count = shape * shape\n        shape.times do |row|\n          shape.times do |col|\n            if col >= mini_square_num and col < shape - mini_square_num\n              if row >= mini_square_num and row < shape - mini_square_num\n      \t        nm[row,col] = count\n              else \n                nm[row,col] = inv_count\n              end\n            elsif row < mini_square_num or row >= shape - mini_square_num\n              nm[row,col] = count\n            else\n              nm[row,col] = inv_count\n            end\n            count += 1\n            inv_count -= 1  \n          end\n        end\n      end\n    \n      def singly_even_magic(nm, shape)\n        half_shape = shape / 2\n        complementary_pair = (shape - 2) / 4\n        swap_col = NMatrix.new([shape])\n        index = 0 \n        mini_magic = NMatrix.new([half_shape,half_shape], 0, dtype: nm.dtype)\n        odd_magic mini_magic, half_shape\n        half_shape.times do |row|\n          half_shape.times do |col|\n            nm[row,col] = mini_magic[row,col]  \t\n            nm[row + half_shape,col + half_shape] = mini_magic[row,col] + half_shape * half_shape  \n            nm[row,col + half_shape] = mini_magic[row,col] + 2 * half_shape * half_shape      \n            nm[row + half_shape,col] = mini_magic[row,col] + 3 * half_shape * half_shape       \n          end  \n        end\n  \n        (1..complementary_pair).each do |complementary_entry|\n          swap_col[index] = complementary_entry\n          index += 1\n        end\n      \n        (shape - complementary_pair + 2..shape).each do |center|\n          swap_col[index] = center\n          index += 1\n        end \n      \n        (1..half_shape).each do |row|\n          (1..index).each do |col|\n            temp = nm[row - 1,swap_col[col - 1] - 1]\n            nm[row - 1,swap_col[col - 1] - 1] = nm[row + half_shape - 1,swap_col[col - 1] - 1]\n            nm[row + half_shape - 1,swap_col[col - 1] - 1] = temp\n          end\n        end\n\n        temp = nm[complementary_pair,0] \n        nm[complementary_pair,0] = nm[complementary_pair + half_shape,0] \n        nm[complementary_pair + half_shape,0] = temp\n\n        temp = nm[complementary_pair + half_shape,complementary_pair]\n        nm[complementary_pair + half_shape,complementary_pair] = nm[complementary_pair,complementary_pair] \n        nm[complementary_pair,complementary_pair] = temp\n      end  \n    end \n  end \n    \n  # call-seq:\n  #     m.dense? -> true or false\n  #\n  # Determine if +m+ is a dense matrix.\n  def dense?; return stype == :dense; end\n\n  # call-seq:\n  #     m.yale? -> true or false\n  #\n  # Determine if +m+ is a Yale matrix.\n  def yale?;  return stype == :yale; end\n\n  # call-seq:\n  #     m.list? -> true or false\n  #\n  # Determine if +m+ is a list-of-lists matrix.\n  def list?;  return stype == :list; end\n\n  class << self\n    # call-seq:\n    #     NMatrix[Numeric, ..., Numeric, dtype: Symbol] -> NMatrix\n    #     NMatrix[Array, dtype: Symbol] -> NMatrix\n    #\n    # The default value for +dtype+ is guessed from the first parameter. For example:\n    #   NMatrix[1.0, 2.0].dtype # => :float64\n    #\n    # But this is just a *guess*. If the other values can't be converted to\n    # this dtype, a +TypeError+ will be raised.\n    #\n    # You can use the +N+ constant in this way:\n    #   N = NMatrix\n    #   N[1, 2, 3]\n    #\n    # NMatrix needs to have a succinct way to create a matrix by specifying the\n    # components directly. This is very useful for using it as an advanced\n    # calculator, it is useful for learning how to use, for testing language\n    # features and for developing algorithms.\n    #\n    # The NMatrix::[] method provides a way to create a matrix in a way that is compact and\n    # natural. The components are specified using Ruby array syntax. Optionally,\n    # one can specify a dtype as the last parameter (default is :float64).\n    #\n    # Examples:\n    #\n    #   a = N[ 1,2,3,4 ]          =>  1  2  3  4\n    #\n    #   a = N[ 1,2,3,4, :int32 ]  =>  1  2  3  4\n    #\n    #   a = N[ [1,2,3], [3,4,5] ] =>  1.0  2.0  3.0\n    #                                 3.0  4.0  5.0\n    #\n    #   a = N[ 3,6,9 ].transpose => 3\n    #                               6\n    #                               9\n    #\n    # SYNTAX COMPARISON:\n    #\n    #   MATLAB:  a = [ [1 2 3] ; [4 5 6] ]   or  [ 1 2 3 ; 4 5 6 ]\n    #   IDL:   a = [ [1,2,3] , [4,5,6] ]\n    #   NumPy:  a = array( [1,2,3], [4,5,6] )\n    #\n    #   SciRuby:      a = NMatrix[ [1,2,3], [4,5,6] ]\n    #   Ruby array:   a =  [ [1,2,3], [4,5,6] ]\n    def [](*params)\n      options = params.last.is_a?(Hash) ? params.pop : {}\n\n      # First find the dimensions of the array.\n      i = 0\n      shape = []\n      row = params\n      while row.is_a?(Array)\n        shape[i] = row.length\n        row = row[0]\n        i += 1\n      end\n\n      # A row vector should be stored as 1xN, not N\n      #shape.unshift(1) if shape.size == 1\n\n      # Then flatten the array.\n      NMatrix.new(shape, params.flatten, options)\n    end\n\n    #\n    # call-seq:\n    #    zeros(shape) -> NMatrix\n    #    zeros(shape, dtype: dtype) -> NMatrix\n    #    zeros(shape, dtype: dtype, stype: stype) -> NMatrix\n    #\n    # Creates a new matrix of zeros with the dimensions supplied as\n    # parameters.\n    #\n    # * *Arguments* :\n    #   - +shape+ -> Array (or integer for square matrix) specifying the dimensions.\n    #   - +dtype+ -> (optional) Default is +:float64+\n    #   - +stype+ -> (optional) Default is +:dense+.\n    # * *Returns* :\n    #   - NMatrix filled with zeros.\n    #\n    # Examples:\n    #\n    #   NMatrix.zeros(2) # =>  0.0   0.0\n    #                          0.0   0.0\n    #\n    #   NMatrix.zeros([2, 3], dtype: :int32) # =>  0  0  0\n    #                                              0  0  0\n    #\n    #   NMatrix.zeros([1, 5], dtype: :int32) # =>  0  0  0  0  0\n    #\n    def zeros(shape, opts = {})\n      NMatrix.new(shape, 0, {:dtype => :float64}.merge(opts))\n    end\n    alias :zeroes :zeros\n\n    #\n    # call-seq:\n    #     ones(shape) -> NMatrix\n    #     ones(shape, dtype: dtype, stype: stype) -> NMatrix\n    #\n    # Creates a matrix filled with ones.\n    #\n    # * *Arguments* :\n    #   - +shape+ -> Array (or integer for square matrix) specifying the shape.\n    #   - +opts+ -> (optional) Hash of options from NMatrix#initialize\n    # * *Returns* :\n    #   - NMatrix filled with ones.\n    #\n    # Examples:\n    #\n    #   NMatrix.ones([1, 3]) # =>  1.0   1.0   1.0\n    #\n    #   NMatrix.ones([2, 3], dtype: :int32) # =>  1  1  1\n    #                                             1  1  1\n    #\n    def ones(shape, opts={})\n      NMatrix.new(shape, 1, {:dtype => :float64, :default => 1}.merge(opts))\n    end\n\n    # call-seq:\n    #   ones_like(nm) -> NMatrix\n    #\n    # Creates a new matrix of ones with the same dtype and shape as the\n    # provided matrix.\n    #\n    # @param [NMatrix] nm the nmatrix whose dtype and shape will be used\n    # @return [NMatrix] a new nmatrix filled with ones.\n    #\n    def ones_like(nm)\n      NMatrix.ones(nm.shape, dtype: nm.dtype, stype: nm.stype, capacity: nm.capacity, default: 1)\n    end\n\n    # call-seq:\n    #   zeros_like(nm) -> NMatrix\n    #\n    # Creates a new matrix of zeros with the same stype, dtype, and shape\n    # as the provided matrix.\n    #\n    # @param [NMatrix] nm the nmatrix whose stype, dtype, and shape will be used\n    # @return [NMatrix] a new nmatrix filled with zeros.\n    #\n    def zeros_like(nm)\n      NMatrix.zeros(nm.shape, dtype: nm.dtype, stype: nm.stype, capacity: nm.capacity, default: 0)\n    end\n\n    #\n    # call-seq:\n    #     eye(shape) -> NMatrix\n    #     eye(shape, dtype: dtype) -> NMatrix\n    #     eye(shape, stype: stype, dtype: dtype) -> NMatrix\n    #\n    # Creates an identity matrix (square matrix rank 2).\n    #\n    # * *Arguments* :\n    #   - +size+ -> Array (or integer for square matrix) specifying the dimensions.\n    #   - +dtype+ -> (optional) Default is +:float64+\n    #   - +stype+ -> (optional) Default is +:dense+.\n    # * *Returns* :\n    #   - An identity matrix.\n    #\n    # Examples:\n    #\n    #    NMatrix.eye(3) # =>   1.0   0.0   0.0\n    #                          0.0   1.0   0.0\n    #                          0.0   0.0   1.0\n    #\n    #    NMatrix.eye(3, dtype: :int32) # =>   1   0   0\n    #                                         0   1   0\n    #                                         0   0   1\n    #\n    #    NMatrix.eye(2, dtype: :int32, stype: :yale) # =>   1   0\n    #                                                       0   1\n    #\n    def eye(shape, opts={})\n      # Fill the diagonal with 1's.\n      m = NMatrix.zeros(shape, {:dtype => :float64}.merge(opts))\n      (0...m.shape[0]).each do |i|\n        m[i, i] = 1\n      end\n\n      m\n    end\n    alias :identity :eye\n\n    #\n    # call-seq:\n    #     hilbert(shape) -> NMatrix\n    #     hilbert(shape, dtype: dtype) -> NMatrix\n    #     hilbert(shape, stype: stype, dtype: dtype) -> NMatrix\n    #\n    # Creates an hilbert matrix (square matrix).\n    #\n    # * *Arguments* :\n    #   - +size+ -> integer ( for square matrix) specifying the dimensions.\n    #   - +dtype+ -> (optional) Default is +:float64+\n    #   - +stype+ -> (optional) Default is +:dense+.\n    # * *Returns* :\n    #   - A hilbert matrix.\n    #\n    # Examples:\n    #\n    #    NMatrix.hilbert(3) # =>  1.0     0.5      0.3333333333333333\n    #            0.5                         0.3333333333333333    0.25\n    #            0.3333333333333333          0.25                  0.2\n    #\n    def hilbert(shape, opts={})\n      m = NMatrix.new([shape,shape], {:dtype => :float64}.merge(opts))\n      0.upto(shape - 1) do |i|\n        0.upto(i) do |j|\n          m[i,j] = 1.0 / (j + i + 1)\n          m[j,i] = m[i,j] if i != j\n        end\n      end\n      m\n    end\n\n    #\n    # call-seq:\n    #     inv_hilbert(shape) -> NMatrix\n    #     inv_hilbert(shape, dtype: dtype) -> NMatrix\n    #     inv_hilbert(shape, stype: stype, dtype: dtype) -> NMatrix\n    #\n    # Creates an inverse hilbert matrix (square matrix rank 2).\n    #\n    # * *Arguments* :\n    #   - +size+ -> Array (or integer for square matrix) specifying the dimensions.\n    #   - +dtype+ -> (optional) Default is +:float64+\n    #   - +stype+ -> (optional) Default is +:dense+.\n    # * *Returns* :\n    #   - A hilbert matrix.\n    #\n    # Examples:\n    #    NMatrix.inv_hilbert(3) # =>   9.0,  -36.0,   30.0\n    #                          -36.0,  192.0, -180.0\n    #                          30.0, -180.0,  180.0\n    #\n    #\n    def inv_hilbert(shape, opts={})\n      opts = {:dtype => :float64}.merge(opts)\n      m = NMatrix.new([shape,shape],opts)\n      combination = NMatrix.new([2*shape,2*shape],opts)\n      #combinations refers to the combination of n things taken k at a time\n      0.upto(2*shape-1) do |i|\n        0.upto(i) do |j|\n          if j != 0 and j != i\n            combination[i,j] = combination[i-1,j] + combination[i-1,j-1]\n          else\n            combination[i,j] = 1\n          end\n        end\n      end\n\n      0.upto(shape-1) do |i|\n        0.upto(i) do |j|\n          m[i,j] = combination[shape + j,shape - i - 1] * ((i+j)+1) * \\\n          combination[shape + i,shape - j - 1] * (-1) ** ((i+j)) * \\\n          combination[(i+j),i] * combination[(i+j),i]\n          m[j,i] = m[i,j] if i != j\n        end\n      end\n      m\n    end\n\n    #\n    # call-seq:\n    #     diagonals(array) -> NMatrix\n    #     diagonals(array, dtype: dtype, stype: stype) -> NMatrix\n    #\n    # Creates a matrix filled with specified diagonals.\n    #\n    # * *Arguments* :\n    #   - +entries+ -> Array containing input values for diagonal matrix\n    #   - +options+ -> (optional) Hash with options for NMatrix#initialize\n    # * *Returns* :\n    #   - NMatrix filled with specified diagonal values.\n    #\n    # Examples:\n    #\n    #   NMatrix.diagonal([1.0,2,3,4]) # => 1.0 0.0 0.0 0.0\n    #                                      0.0 2.0 0.0 0.0\n    #                                      0.0 0.0 3.0 0.0\n    #                                      0.0 0.0 0.0 4.0\n    #\n    #   NMatrix.diagonal([1,2,3,4], dtype: :int32) # => 1 0 0 0\n    #                                                   0 2 0 0\n    #                                                   0 0 3 0\n    #                                                   0 0 0 4\n    #\n    #\n    def diagonal(entries, opts={})\n      m = NMatrix.zeros(entries.size,\n                        {:dtype => guess_dtype(entries[0]), :capacity => entries.size + 1}.merge(opts)\n                       )\n      entries.each_with_index do |n, i|\n        m[i,i] = n\n      end\n      m\n    end\n    alias :diag :diagonal\n    alias :diagonals :diagonal\n\n    # Generate a block-diagonal NMatrix from the supplied 2D square matrices.\n    #\n    # * *Arguments*\n    #   - +*params+ -> An array that collects all arguments passed to the method. The method\n    #                  can receive any number of arguments. Optionally, the last entry of +params+ is \n    #                  a hash of options from NMatrix#initialize. All other entries of +params+ are \n    #                  the blocks of the desired block-diagonal matrix. Each such matrix block can be \n    #                  supplied as a square 2D NMatrix object, or alternatively as an array of arrays \n    #                  (with dimensions corresponding to a square matrix), or alternatively as a number.\n    # * *Returns*\n    #   - NMatrix of block-diagonal form filled with specified matrices\n    #     as the blocks along the diagonal.\n    #\n    # * *Example* \n    #\n    #  a = NMatrix.new([2,2], [1,2,3,4])\n    #  b = NMatrix.new([1,1], [123], dtype: :float64)\n    #  c = Array.new(2) { [[10,10], [10,10]] }\n    #  d = Array[[1,2,3], [4,5,6], [7,8,9]]\n    #  m = NMatrix.block_diagonal(a, b, *c, d, 10.0, 11, dtype: :int64, stype: :yale)\n    #        => \n    #        [\n    #          [1, 2,   0,  0,  0,  0,  0, 0, 0, 0,  0,  0]\n    #          [3, 4,   0,  0,  0,  0,  0, 0, 0, 0,  0,  0]\n    #          [0, 0, 123,  0,  0,  0,  0, 0, 0, 0,  0,  0]\n    #          [0, 0,   0, 10, 10,  0,  0, 0, 0, 0,  0,  0]\n    #          [0, 0,   0, 10, 10,  0,  0, 0, 0, 0,  0,  0]\n    #          [0, 0,   0,  0,  0, 10, 10, 0, 0, 0,  0,  0]\n    #          [0, 0,   0,  0,  0, 10, 10, 0, 0, 0,  0,  0]\n    #          [0, 0,   0,  0,  0,  0,  0, 1, 2, 3,  0,  0]\n    #          [0, 0,   0,  0,  0,  0,  0, 4, 5, 6,  0,  0]\n    #          [0, 0,   0,  0,  0,  0,  0, 7, 8, 9,  0,  0]\n    #          [0, 0,   0,  0,  0,  0,  0, 0, 0, 0, 10,  0]\n    #          [0, 0,   0,  0,  0,  0,  0, 0, 0, 0,  0, 11]\n    #        ]\n    #\n    def block_diagonal(*params)\n      options = params.last.is_a?(Hash) ? params.pop : {}\n\n      params.each_index do |i|\n        params[i] = params[i].to_nm if params[i].is_a?(Array) # Convert Array to NMatrix\n        params[i] = NMatrix.new([1,1], [params[i]]) if params[i].is_a?(Numeric) # Convert number to NMatrix\n      end\n\n      block_sizes = [] #holds the size of each matrix block\n      params.each do |b|\n        unless b.is_a?(NMatrix)\n          raise(ArgumentError, \"Only NMatrix or appropriate Array objects or single numbers allowed\")\n        end\n        raise(ArgumentError, \"Only 2D matrices or 2D arrays allowed\") unless b.shape.size == 2\n        raise(ArgumentError, \"Only square-shaped blocks allowed\") unless b.shape[0] == b.shape[1]\n        block_sizes << b.shape[0]\n      end\n\n      block_diag_mat = NMatrix.zeros(block_sizes.inject(0,:+), options)\n      (0...params.length).each do |n|\n        # First determine the size and position of the n'th block in the block-diagonal matrix\n        block_size = block_sizes[n]\n        block_pos = block_sizes[0...n].inject(0,:+)\n        # populate the n'th block in the block-diagonal matrix\n        (0...block_size).each do |i|\n          (0...block_size).each do |j|\n            block_diag_mat[block_pos+i,block_pos+j] = params[n][i,j]\n          end\n        end\n      end\n\n      return block_diag_mat\n    end\n    alias :block_diag :block_diagonal\n\n    #\n    # call-seq:\n    #     random(shape) -> NMatrix\n    #\n    # Creates a +:dense+ NMatrix with random numbers between 0 and 1 generated\n    # by +Random::rand+. The parameter is the dimension of the matrix.\n    #\n    # If you use an integer dtype, make sure to specify :scale as a parameter, or you'll\n    # only get a matrix of 0s.\n    #\n    # * *Arguments* :\n    #   - +shape+ -> Array (or integer for square matrix) specifying the dimensions.\n    # * *Returns* :\n    #   - NMatrix filled with random values.\n    #\n    # Examples:\n    #\n    #   NMatrix.random([2, 2]) # => 0.4859439730644226   0.1783195585012436\n    #                               0.23193766176700592  0.4503345191478729\n    #\n    #   NMatrix.random([2, 2], :dtype => :byte, :scale => 255) # => [ [252, 108] [44, 12] ]\n    #\n    def random(shape, opts={})\n      scale = opts.delete(:scale) || 1.0\n\n      if opts[:seed].nil?\n        rng = Random.new\n      else\n        rng = Random.new(opts[:seed])\n      end\n      \n\n      random_values = []\n\n\n      # Construct the values of the final matrix based on the dimension.\n      if opts[:dtype] == :complex64 || opts[:dtype] == :complex128\n        NMatrix.size(shape).times { |i| random_values << Complex(rng.rand(scale), rng.rand(scale)) }\n      else\n        NMatrix.size(shape).times { |i| random_values << rng.rand(scale) }\n      end\n\n      NMatrix.new(shape, random_values, {:dtype => :float64, :stype => :dense}.merge(opts))\n    end\n    alias :rand :random\n    \n    # \n    #  call-seq:\n    #    magic(shape) -> NMatrix\n    #    magic(shape, dtype: dtype) -> NMatrix\n    #\n    #  The parameter is the dimension of the matrix.\n    #\n    #  Creates a +:dense+ NMatrix with the following properties:\n    #    - An arrangement of the numbers from 1 to n^2 (n-squared) in the matrix, with each number occurring exactly once.\n    #    - The sum of the entries of any row, any column, or any main diagonal is the same.\n    #    - This sum must be n(n^2+1)/2.\n    #   \n    #  See: http://www.mathworks.com/help/matlab/ref/magic.html\n    # \n    #  * *Arguments* :\n    #   - +shape+ -> Array (or integer for square matrix) specifying the dimensions.\n    #   - +dtype+ -> (optional) Default is +:float64+\n    #  * *Returns* :\n    #   - NMatrix with the above given properties.\n    #\n    #  Examples:\n    #       \n    #    NMatrix.magic(3) # => [  [4.0, 9.0, 2.0]   [3.0, 5.0, 7.0]   [8.0, 1.0, 6.0] ]\n    #    \n    #    NMatrix.magic(4, dtype :int32) # => [  [ 1, 15, 14,  4]\n    #                                           [12,  6,  7,  9]\n    #                                           [ 8, 10, 11,  5]\n    #                                           [13,  3,  2, 16] ]\n    #                             \n    #    NMatrix.magic(6,dtype: :int64) # => [  [31,  9,  2, 22, 27, 20]\n    #                                           [ 3, 32,  7, 21, 23, 25]\n    #                                           [35,  1,  6, 26, 19, 24]\n    #                                           [ 4, 36, 29, 13, 18, 11]\n    #                                           [30,  5, 34, 12, 14, 16]\n    #                                           [ 8, 28, 33, 17, 10, 15] ]\n    #\n    def magic(shape, opts={})\n      raise(ArgumentError, \"shape of two is not allowed\") if shape == 2\n      nm = NMatrix.new([shape,shape], 0, {:dtype => :float64}.merge(opts))\n      if shape % 2 != 0\n        MagicHelpers.odd_magic nm, shape\n      elsif shape % 4 == 0\n        MagicHelpers.doubly_even_magic nm, shape\n      else   \n        MagicHelpers.singly_even_magic nm, shape\n      end\n      nm\n    end\n    \n    #\n    # call-seq:\n    #     linspace(base, limit) -> 1x100 NMatrix\n    #     linspace(base, limit, *shape) -> NMatrix\n    #\n    # Returns an NMatrix with +[shape[0] x shape[1] x .. x shape[dim-1]]+ values of dtype +:float64+ equally spaced from\n    # +base+ to +limit+, inclusive.\n    #\n    # See: http://www.mathworks.com/help/matlab/ref/linspace.html\n    #\n    # * *Arguments* :\n    #   - +base+ -> The first value in the sequence.\n    #   - +limit+ -> The last value in the sequence.\n    #   - +shape+ -> Desired output shape. Default returns a 1x100 row vector.\n    # * *Returns* :\n    #   - NMatrix with +:float64+ values.\n    #\n    # Examples :-\n    #\n    #   NMatrix.linspace(1,Math::PI, 6)\n    #     =>[1.0,\n    #        1.4283185005187988,\n    #        1.8566370010375977,\n    #        2.2849555015563965,\n    #        2.7132740020751953,\n    #        3.1415927410125732\n    #       ]\n    #\n    #   NMatrix.linspace(1,10, [3,2])\n    #     =>[\n    #         [              1.0, 2.799999952316284]\n    #         [4.599999904632568, 6.400000095367432]\n    #         [8.199999809265137,              10.0]\n    #       ]\n    #\n    def linspace(base, limit, shape = [100])\n      \n      # Convert shape to array format \n      shape = [shape] if shape.is_a? Integer \n      \n      #Calculate number of elements \n      count = shape.inject(:*)\n            \n      # Linear spacing between elements calculated in step\n      #   step = limit - base / (count - 1)\n      #   [Result Sequence] = [0->N sequence] * step + [Base]\n      step = (limit - base) * (1.0 / (count - 1))\n      result = NMatrix.seq(shape, {:dtype => :float64}) * step\n      result += NMatrix.new(shape, base)\n      result\n    end\n\n    # call-seq:\n    #     logspace(base, limit) -> 1x50 NMatrix with exponent_base = 10 \n    #     logspace(base, limit, shape , exponent_base:) -> NMatrix\n    #     logspace(base, :pi, n) -> 1xn NMatrix with interval [10 ^ base, Math::PI]\n    #\n    # Returns an NMatrix with +[shape[0] x shape[1] x .. x shape[dim-1]]+ values of dtype +:float64+ logarithmically spaced from\n    # +exponent_base ^ base+ to +exponent_base ^ limit+, inclusive.\n    #\n    # See: http://www.mathworks.com/help/matlab/ref/logspace.html\n    #\n    # * *Arguments* :\n    #   - +base+ -> exponent_base ** base is the first value in the sequence\n    #   - +limit+ -> exponent_base ** limit is the last value in the sequence.\n    #   - +shape+ -> Desired output shape. Default returns a 1x50 row vector.\n    # * *Returns* :\n    #   - NMatrix with +:float64+ values.\n    #\n    # Examples :-\n    #\n    #   NMatrix.logspace(1,:pi,7)\n    #     =>[\n    #         10.0000, \n    #         8.2450, \n    #         6.7980, \n    #         5.6050, \n    #         4.6213, \n    #         3.8103, \n    #         3.1416\n    #       ]\n    #\n    #   NMatrix.logspace(1,2,[3,2])\n    #     =>[\n    #         [10.0, 15.8489]\n    #         [25.1189, 39.8107]\n    #         [63.0957, 100.0]\n    #       ]\n    #\n    def logspace(base, limit, shape = [50], exponent_base: 10)\n\n      #Calculate limit for [10 ^ base ... Math::PI] if limit = :pi\n      limit = Math.log(Math::PI, exponent_base = 10) if limit == :pi \n      shape = [shape] if shape.is_a? Integer\n\n      #[base...limit]  -> [exponent_base ** base ... exponent_base ** limit]\n      result = NMatrix.linspace(base, limit, shape)\n      result.map {|element| exponent_base ** element}\n    end\n\n    #\n    # call-seq:\n    #     linspace(base, limit) -> 1x100 NMatrix\n    #     linspace(base, limit, *shape) -> NMatrix\n    #\n    # Returns an NMatrix with +[shape[0] x shape[1] x .. x shape[dim-1]]+ values of dtype +:float64+ equally spaced from\n    # +base+ to +limit+, inclusive.\n    #\n    # See: http://www.mathworks.com/help/matlab/ref/linspace.html\n    #\n    # * *Arguments* :\n    #   - +base+ -> The first value in the sequence.\n    #   - +limit+ -> The last value in the sequence.\n    #   - +shape+ -> Desired output shape. Default returns a 1x100 row vector.\n    # * *Returns* :\n    #   - NMatrix with +:float64+ values.\n    #\n    # Examples :-\n    #\n    #   NMatrix.linspace(1,Math::PI, 6)\n    #     =>[1.0,\n    #        1.4283185005187988,\n    #        1.8566370010375977,\n    #        2.2849555015563965,\n    #        2.7132740020751953,\n    #        3.1415927410125732\n    #       ]\n    #\n    #   NMatrix.linspace(1,10, [3,2])\n    #     =>[\n    #         [              1.0, 2.799999952316284]\n    #         [4.599999904632568, 6.400000095367432]\n    #         [8.199999809265137,              10.0]\n    #       ]\n    #\n    def linspace(base, limit, shape = [100])\n      \n      # Convert shape to array format \n      shape = [shape] if shape.is_a? Integer \n      \n      #Calculate number of elements \n      count = shape.inject(:*)\n            \n      # Linear spacing between elements calculated in step\n      #   step = limit - base / (count - 1)\n      #   [Result Sequence] = [0->N sequence] * step + [Base]\n      step = (limit - base) * (1.0 / (count - 1))\n      result = NMatrix.seq(shape, {:dtype => :float64}) * step\n      result += NMatrix.new(shape, base)\n      result\n    end\n\n    # call-seq:\n    #     logspace(base, limit) -> 1x50 NMatrix with exponent_base = 10 \n    #     logspace(base, limit, shape , exponent_base:) -> NMatrix\n    #     logspace(base, :pi, n) -> 1xn NMatrix with interval [10 ^ base, Math::PI]\n    #\n    # Returns an NMatrix with +[shape[0] x shape[1] x .. x shape[dim-1]]+ values of dtype +:float64+ logarithmically spaced from\n    # +exponent_base ^ base+ to +exponent_base ^ limit+, inclusive.\n    #\n    # See: http://www.mathworks.com/help/matlab/ref/logspace.html\n    #\n    # * *Arguments* :\n    #   - +base+ -> exponent_base ** base is the first value in the sequence\n    #   - +limit+ -> exponent_base ** limit is the last value in the sequence.\n    #   - +shape+ -> Desired output shape. Default returns a 1x50 row vector.\n    # * *Returns* :\n    #   - NMatrix with +:float64+ values.\n    #\n    # Examples :-\n    #\n    #   NMatrix.logspace(1,:pi,7)\n    #     =>[\n    #         10.0000, \n    #         8.2450, \n    #         6.7980, \n    #         5.6050, \n    #         4.6213, \n    #         3.8103, \n    #         3.1416\n    #       ]\n    #\n    #   NMatrix.logspace(1,2,[3,2])\n    #     =>[\n    #         [10.0, 15.8489]\n    #         [25.1189, 39.8107]\n    #         [63.0957, 100.0]\n    #       ]\n    #\n    def logspace(base, limit, shape = [50], exponent_base: 10)\n\n      #Calculate limit for [10 ^ base ... Math::PI] if limit = :pi\n      limit = Math.log(Math::PI, exponent_base = 10) if limit == :pi \n      shape = [shape] if shape.is_a? Integer\n\n      #[base...limit]  -> [exponent_base ** base ... exponent_base ** limit]\n      result = NMatrix.linspace(base, limit, shape)\n      result.map {|element| exponent_base ** element}\n    end\n\n    #\n    # call-seq:\n    #     seq(shape) -> NMatrix\n    #     seq(shape, options) -> NMatrix\n    #     bindgen(shape) -> NMatrix of :byte\n    #     indgen(shape) -> NMatrix of :int64\n    #     findgen(shape) -> NMatrix of :float32\n    #     dindgen(shape) -> NMatrix of :float64\n    #     cindgen(shape) -> NMatrix of :complex64\n    #     zindgen(shape) -> NMatrix of :complex128\n    #     rbindgen(shape) -> NMatrix of :object\n    #\n    # Creates a matrix filled with a sequence of integers starting at zero.\n    #\n    # * *Arguments* :\n    #   - +shape+ -> Array (or integer for square matrix) specifying the dimensions.\n    #   - +options+ -> (optional) Options permissible for NMatrix#initialize\n    # * *Returns* :\n    #   - NMatrix filled with values 0 through +size+.\n    #\n    # Examples:\n    #\n    #   NMatrix.seq(2) # =>   0   1\n    #                 2   3\n    #\n    #   NMatrix.seq([3, 3], dtype: :float32) # =>  0.0  1.0  2.0\n    #                                       3.0  4.0  5.0\n    #                                       6.0  7.0  8.0\n    #\n    def seq(shape, options={})\n\n      # Construct the values of the final matrix based on the dimension.\n      values = (0 ... NMatrix.size(shape)).to_a\n\n      # It'll produce :int32, except if a dtype is provided.\n      NMatrix.new(shape, values, {:stype => :dense}.merge(options))\n    end\n\n    {:bindgen => :byte, :indgen => :int64, :findgen => :float32, :dindgen => :float64,\n     :cindgen => :complex64, :zindgen => :complex128,\n     :rbindgen => :object}.each_pair do |meth, dtype|\n      define_method(meth) { |shape| NMatrix.seq(shape, :dtype => dtype) }\n    end\n  end\nend\n\nmodule NVector #:nodoc:\n\n  class << self\n    #\n    # call-seq:\n    #     new(shape) -> NVector\n    #     new(stype, shape) -> NVector\n    #     new(shape, init) -> NVector\n    #     new(:dense, shape, init) -> NVector\n    #     new(:list, shape, init) -> NVector\n    #     new(shape, init, dtype) -> NVector\n    #     new(stype, shape, init, dtype) -> NVector\n    #     new(stype, shape, dtype) -> NVector\n    #\n    # Creates a new NVector. See also NMatrix#initialize for a more detailed explanation of\n    # the arguments.\n    #\n    # * *Arguments* :\n    #   - +stype+ -> (optional) Storage type of the vector (:list, :dense, :yale). Defaults to :dense.\n    #   - +shape+ -> Shape of the vector. Accepts [n,1], [1,n], or n, where n is a Fixnum.\n    #   - +init+ -> (optional) Yale: capacity; List: default value (0); Dense: initial value or values (uninitialized by default).\n    #   - +dtype+ -> (optional if +init+ provided) Data type stored in the vector. For :dense and :list, can be inferred from +init+.\n    # * *Returns* :\n    #   -\n    #\n    def new(*args)\n      stype = args[0].is_a?(Symbol) ? args.shift : :dense\n      shape = args[0].is_a?(Array) ? args.shift  : [1,args.shift]\n\n      if shape.size != 2 || !shape.include?(1) || shape == [1,1]\n        raise(ArgumentError, \"shape must be a Fixnum or an Array of positive Fixnums where exactly one value is 1\")\n      end\n\n      warn \"NVector is deprecated and not guaranteed to work any longer\"\n\n      NMatrix.new(stype, shape, *args)\n    end\n\n    #\n    # call-seq:\n    #     zeros(size) -> NMatrix\n    #     zeros(size, dtype) -> NMatrix\n    #\n    # Creates a new vector of zeros with the dimensions supplied as\n    # parameters.\n    #\n    # * *Arguments* :\n    #   - +size+ -> Array (or integer for square matrix) specifying the dimensions.\n    #   - +dtype+ -> (optional) Default is +:float64+.\n    # * *Returns* :\n    #   - NVector filled with zeros.\n    #\n    # Examples:\n    #\n    #   NVector.zeros(2) # =>  0.0\n    #                          0.0\n    #\n    #   NVector.zeros(3, :int32) # =>  0\n    #                                  0\n    #                                  0\n    #\n    def zeros(size, dtype = :float64)\n      NMatrix.new([size,1], 0, dtype: dtype)\n    end\n    alias :zeroes :zeros\n\n    #\n    # call-seq:\n    #     ones(size) -> NVector\n    #     ones(size, dtype) -> NVector\n    #\n    # Creates a vector of ones with the dimensions supplied as\n    # parameters.\n    #\n    # * *Arguments* :\n    #   - +size+ -> Array (or integer for square matrix) specifying the dimensions.\n    #   - +dtype+ -> (optional) Default is +:float64+.\n    # * *Returns* :\n    #   - NVector filled with ones.\n    #\n    # Examples:\n    #\n    #   NVector.ones(2) # =>  1.0\n    #                         1.0\n    #\n    #   NVector.ones(3, :int32) # =>  1\n    #                                 1\n    #                                 1\n    #\n    def ones(size, dtype = :float64)\n      NMatrix.new([size,1], 1, dtype: dtype)\n    end\n\n    #\n    # call-seq:\n    #     random(size) -> NVector\n    #\n    # Creates a vector with random numbers between 0 and 1 generated by\n    # +Random::rand+ with the dimensions supplied as parameters.\n    #\n    # * *Arguments* :\n    #   - +size+ -> Array (or integer for square matrix) specifying the dimensions.\n    #   - +opts+ -> (optional) NMatrix#initialize options\n    # * *Returns* :\n    #   - NVector filled with random numbers generated by the +Random+ class.\n    #\n    # Examples:\n    #\n    #   NVector.rand(2) # =>  0.4859439730644226\n    #                         0.1783195585012436\n    #\n    def random(size, opts = {})\n      rng = Random.new\n\n      random_values = []\n      size.times { |i| random_values << rng.rand }\n\n      NMatrix.new([size,1], random_values, opts)\n    end\n\n    #\n    # call-seq:\n    #     seq(n) -> NVector\n    #     seq(n, dtype) -> NVector\n    #\n    # Creates a vector with a sequence of +n+ integers starting at zero. You\n    # can choose other types based on the dtype parameter.\n    #\n    # * *Arguments* :\n    #   - +n+ -> Number of integers in the sequence.\n    #   - +dtype+ -> (optional) Default is +:int64+.\n    # * *Returns* :\n    #   - NVector filled with +n+ integers.\n    #\n    # Examples:\n    #\n    #   NVector.seq(2) # =>  0\n    #                        1\n    #\n    #   NVector.seq(3, :float32) # =>  0.0\n    #                                  1.0\n    #                                  2.0\n    #\n    def seq(size, dtype = :int64)\n      values = (0 ... size).to_a\n\n      NMatrix.new([size,1], values, dtype: dtype)\n    end\n\n    #\n    # call-seq:\n    #     indgen(n) -> NVector\n    #\n    # Returns an integer NVector. Equivalent to <tt>seq(n, :int32)</tt>.\n    #\n    # * *Arguments* :\n    #   - +n+ -> Size of the sequence.\n    # * *Returns* :\n    #   - NVector filled with +n+ integers of dtype +:int32+.\n    #\n    def indgen(n)\n      NVector.seq(n, :int32)\n    end\n\n    #\n    # call-seq:\n    #     findgen(n) -> NVector\n    #\n    # Returns a float NVector. Equivalent to <tt>seq(n, :float32)</tt>.\n    #\n    # * *Arguments* :\n    #   - +n+ -> Size of the sequence.\n    # * *Returns* :\n    #   - NVector filled with +n+ integers of dtype +:float32+.\n    #\n    def findgen(n)\n      NVector.seq(n, :float32)\n    end\n\n    #\n    # call-seq:\n    #     bindgen(n) -> NVector\n    #\n    # Returns a byte NVector. Equivalent to <tt>seq(n, :byte)</tt>.\n    #\n    # * *Arguments* :\n    #   - +n+ -> Size of the sequence.\n    # * *Returns* :\n    #   - NVector filled with +n+ integers of dtype +:byte+.\n    #\n    def bindgen(n)\n      NVector.seq(n, :byte)\n    end\n\n    #\n    # call-seq:\n    #     cindgen(n) -> NVector\n    #\n    # Returns a complex NVector. Equivalent to <tt>seq(n, :complex64)</tt>.\n    #\n    # * *Arguments* :\n    #   - +n+ -> Size of the sequence.\n    # * *Returns* :\n    #   - NVector filled with +n+ integers of dtype +:complex64+.\n    #\n    def cindgen(n)\n      NVector.seq(n, :complex64)\n    end\n\n    #\n    # call-seq:\n    #     linspace(a, b) -> NVector\n    #     linspace(a, b, n) -> NVector\n    #\n    # Returns a NVector with +n+ values of dtype +:float64+ equally spaced from\n    # +a+ to +b+, inclusive.\n    #\n    # See: http://www.mathworks.com/help/matlab/ref/linspace.html\n    #\n    # * *Arguments* :\n    #   - +a+ -> The first value in the sequence.\n    #   - +b+ -> The last value in the sequence.\n    #   - +n+ -> The number of elements. Default is 100.\n    # * *Returns* :\n    #   - NVector with +n+ +:float64+ values.\n    #\n    # Example:\n    #   x = NVector.linspace(0, Math::PI, 1000)\n    #   x.pretty_print\n    #     [0.0\n    #     0.0031447373909807737\n    #     0.006289474781961547\n    #     ...\n    #     3.135303178807831\n    #     3.138447916198812\n    #     3.141592653589793]\n    #   => nil\n    #\n    def linspace(a, b, n = 100)\n      # Formula: seq(n) * step + a\n\n      # step = ((b - a) / (n - 1))\n      step = (b - a) * (1.0 / (n - 1))\n\n      # dtype = :float64 is used to prevent integer coercion.\n      result = NVector.seq(n, :float64) * NMatrix.new([n,1], step, dtype: :float64)\n      result += NMatrix.new([n,1], a, dtype: :float64)\n      result\n    end\n\n    #\n    # call-seq:\n    #     logspace(a, b) -> NVector\n    #     logspace(a, b, n) -> NVector\n    #\n    # Returns a NVector with +n+ values of dtype +:float64+ logarithmically\n    # spaced from +10^a+ to +10^b+, inclusive.\n    #\n    # See: http://www.mathworks.com/help/matlab/ref/logspace.html\n    #\n    # * *Arguments* :\n    #   - +a+ -> The first value in the sequence.\n    #   - +b+ -> The last value in the sequence.\n    #   - +n+ -> The number of elements. Default is 100.\n    # * *Returns* :\n    #   - NVector with +n+ +:float64+ values.\n    #\n    # Example:\n    #   x = NVector.logspace(0, Math::PI, 10)\n    #   x.pretty_print\n    #     [1.0\n    #     2.2339109164570266\n    #     4.990357982665873\n    #     11.148015174505757\n    #     24.903672795156997\n    #     55.632586516975095\n    #     124.27824233101062\n    #     277.6265222213364\n    #     620.1929186882427\n    #     1385.4557313670107]\n    #  => nil\n    #\n    def logspace(a, b, n = 100)\n      # Formula: 10^a, 10^(a + step), ..., 10^b, where step = ((b-a) / (n-1)).\n\n      result = NVector.linspace(a, b, n)\n      result.each_stored_with_index { |element, i| result[i] = 10 ** element }\n      result\n    end\n  end\nend\n\n\n# This constant is intended as a simple constructor for NMatrix meant for\n# experimenting.\n#\n# Examples:\n#\n#   a = N[ 1,2,3,4 ]          =>  1  2  3  4\n#\n#   a = N[ 1,2,3,4, :int32 ]  =>  1  2  3  4\n#\n#   a = N[ [1,2,3], [3,4,5] ] =>  1  2  3\n#                                 3  4  5\n#\n#   a = N[ 3,6,9 ].transpose => 3\n#                               6\n#                               9\nN = NMatrix\n"
  },
  {
    "path": "lib/nmatrix/version.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2016, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2016, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#++\n\nclass NMatrix\n  # Note that the format of the VERSION string is needed for NMatrix\n  # native IO. If you change the format, please make sure that native\n  # IO can still understand NMatrix::VERSION.\n  module VERSION #:nodoc:\n    MAJOR = 0\n    MINOR = 2\n    TINY = 4\n    #PRE = \"a\"\n\n    STRING = [MAJOR, MINOR, TINY].compact.join(\".\")\n    #STRING = [MAJOR, MINOR, TINY, PRE].compact.join(\".\")\n  end\nend\n\n"
  },
  {
    "path": "lib/nmatrix/yale_functions.rb",
    "content": "#--\n# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == yale_functions.rb\n#\n# This file contains some shortcut functions for the specialty\n# Yale matrix extensions (mostly for debugging and experimental\n# purposes, but sometimes applicable when you need to speed up\n# your code a lot).\n#++\n\nmodule NMatrix::YaleFunctions\n  # call-seq:\n  #     yale_nd_row_size(i) -> Fixnum\n  #\n  # Returns the size of a given non-diagonal row.\n  def yale_nd_row_size i\n    yale_ija(i+1) - yale_ija(i)\n  end\n\n  # call-seq:\n  #     yale_ja_at(i) -> Array\n  #\n  # Returns the non-diagonal column indices which are stored in a given row.\n  def yale_ja_at i\n    yale_nd_row(i, :keys)\n  end\n  alias :yale_nd_row_as_array :yale_ja_at\n\n  # call-seq:\n  #     yale_ja_set_at(i) -> Set\n  #\n  # Returns the non-diagonal column indices which are stored in a given row, as a Set.\n  def yale_ja_set_at i\n    require 'set'\n    yale_nd_row(i, :keys).to_set\n  end\n  alias :yale_nd_row_as_set :yale_ja_set_at\n\n  # call-seq:\n  #     yale_ja_sorted_set_at -> SortedSet\n  #\n  # Returns the non-diagonal column indices which are stored in a given row, as a Set.\n  def yale_ja_sorted_set_at i\n    require 'set'\n    SortedSet.new(yale_nd_row(i, :keys))\n  end\n  alias :yale_nd_row_as_sorted_set :yale_ja_sorted_set_at\n\n  # call-seq:\n  #     yale_nd_row_as_hash(i) -> Hash\n  #\n  # Returns the non-diagonal column indices and entries stored in a given row.\n  def yale_nd_row_as_hash i\n    yale_nd_row(i, :hash)\n  end\n\n  # call-seq:\n  #     yale_ja_d_keys_at(i) -> Array\n  #\n  # Returns the diagonal and non-digonal column indices stored in a given row.\n  def yale_ja_d_keys_at i\n    ary = yale_nd_row(i, :keys)\n    return ary if i >= self.shape[1] || self[i,i] == self.default_value\n    ary << i\n  end\n  alias :yale_row_as_array :yale_ja_d_keys_at\n\n  # call-seq:\n  #     yale_ja_d_keys_set_at(i) -> Set\n  #\n  # Returns the diagonal and non-diagonal column indices stored in a given row.\n  def yale_ja_d_keys_set_at i\n    require 'set'\n    yale_ja_d_keys_at(i).to_set\n  end\n  alias :yale_row_as_set :yale_ja_d_keys_set_at\n\n  # call-seq:\n  #     yale_ja_d_keys_sorted_set_at(i) -> SortedSet\n  #\n  # Returns the diagonal and non-diagonal column indices stored in a given row.\n  def yale_ja_d_keys_sorted_set_at i\n    require 'set'\n    SortedSet.new(yale_row_as_array(i))\n  end\n  alias :yale_row_as_sorted_set :yale_ja_d_keys_sorted_set_at\n\n  # call-seq:\n  #     yale_row_as_hash(i) -> Hash\n  #\n  # Returns the diagonal and non-diagonal column indices and entries stored in a given row.\n  def yale_row_as_hash i\n    h = yale_nd_row(i, :hash)\n    return h if i >= self.shape[1] || self[i,i] == self.default_value\n    h[i] = self[i,i]\n  end\nend"
  },
  {
    "path": "lib/nmatrix.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == nmatrix.rb\n#\n# This file is a stub that only loads the main NMatrix file.\n#\n\nrequire 'nmatrix/nmatrix.rb'\n"
  },
  {
    "path": "nmatrix-atlas.gemspec",
    "content": "lib = File.expand_path('../lib/', __FILE__)\n$:.unshift lib unless $:.include?(lib)\n\nrequire 'nmatrix/version'\n\nGem::Specification.new do |gem|\n  gem.name = \"nmatrix-atlas\"\n  gem.version = NMatrix::VERSION::STRING\n  gem.summary = \"ATLAS backend for nmatrix\"\n  gem.description = \"For using linear algebra fuctions provided by ATLAS\"\n  gem.homepage = 'http://sciruby.com'\n  gem.authors = ['Will Levine', 'John Woods']\n  gem.email =  ['john.o.woods@gmail.com']\n  gem.license = 'BSD-3-Clause'\n\n  gem.files         = [\"lib/nmatrix/atlas.rb\",\"lib/nmatrix/lapack_ext_common.rb\"]\n  gem.files         += `git ls-files -- ext/nmatrix_atlas`.split(\"\\n\")\n  gem.files         += `git ls-files -- ext/nmatrix | grep \".h$\"`.split(\"\\n\") #need nmatrix header files to compile\n  gem.test_files    = `git ls-files -- spec`.split(\"\\n\")\n  gem.test_files    -= `git ls-files -- spec/plugins`.split(\"\\n\")\n  gem.test_files    += `git ls-files -- spec/plugins/atlas`.split(\"\\n\")\n  gem.extensions = ['ext/nmatrix_atlas/extconf.rb']\n  gem.require_paths = [\"lib\"]\n\n  gem.required_ruby_version = '>= 1.9'\n\n  gem.add_dependency 'nmatrix', NMatrix::VERSION::STRING\nend\n\n"
  },
  {
    "path": "nmatrix-fftw.gemspec",
    "content": "lib = File.expand_path('../lib/', __FILE__)\n$:.unshift lib unless $:.include?(lib)\n\nrequire 'nmatrix/version'\n\nGem::Specification.new do |gem|\n  gem.name = \"nmatrix-fftw\"\n  gem.version = NMatrix::VERSION::STRING\n  gem.summary = \"FFTW backend for NMatrix\"\n  gem.description = \"NMatrix extension for using fuctions provided by FFTW\"\n  gem.homepage = 'http://sciruby.com'\n  gem.authors = ['Sameer Deshmukh', 'Magdalen Berns']\n  gem.email =  ['sameer.deshmukh93@gmail.com', 'm.berns@thismagpie.com']\n  gem.license = 'BSD-3-Clause'\n\n  gem.files         = [\"lib/nmatrix/fftw.rb\"]\n  gem.files         += `git ls-files -- ext/nmatrix_fftw`.split(\"\\n\")\n  gem.files         += `git ls-files -- ext/nmatrix | grep \".h$\"`.split(\"\\n\") #need nmatrix header files to compile\n  gem.test_files    = `git ls-files -- spec`.split(\"\\n\")\n  gem.test_files    -= `git ls-files -- spec/plugins`.split(\"\\n\")\n  gem.test_files    += `git ls-files -- spec/plugins/fftw`.split(\"\\n\")\n  gem.extensions = ['ext/nmatrix_fftw/extconf.rb']\n  gem.require_paths = [\"lib\"]\n\n  gem.required_ruby_version = '>= 1.9'\n\n  gem.add_dependency 'nmatrix', NMatrix::VERSION::STRING\nend\n\n"
  },
  {
    "path": "nmatrix-lapacke.gemspec",
    "content": "lib = File.expand_path('../lib/', __FILE__)\n$:.unshift lib unless $:.include?(lib)\n\nrequire 'nmatrix/version'\n\nGem::Specification.new do |gem|\n  gem.name = \"nmatrix-lapacke\"\n  gem.version = NMatrix::VERSION::STRING\n  gem.summary = \"general LAPACK backend for nmatrix using LAPACKE interface\"\n  gem.description = \"For using linear algebra fuctions provided by LAPACK and BLAS\"\n  gem.homepage = 'http://sciruby.com'\n  gem.authors = ['Will Levine', 'John Woods']\n  gem.email =  ['john.o.woods@gmail.com']\n  gem.license = 'BSD-3-Clause'\n\n  gem.files         = [\"lib/nmatrix/lapacke.rb\",\"lib/nmatrix/lapack_ext_common.rb\"]\n  gem.files         += `git ls-files -- ext/nmatrix_lapacke`.split(\"\\n\")\n  gem.files         += `git ls-files -- ext/nmatrix | grep \".h$\"`.split(\"\\n\") #need nmatrix header files to compile\n  gem.test_files    = `git ls-files -- spec`.split(\"\\n\")\n  gem.test_files    -= `git ls-files -- spec/plugins`.split(\"\\n\")\n  gem.test_files    += `git ls-files -- spec/plugins/lapacke`.split(\"\\n\")\n  gem.extensions = ['ext/nmatrix_lapacke/extconf.rb']\n  gem.require_paths = [\"lib\"]\n\n  gem.required_ruby_version = '>= 1.9'\n\n  gem.add_dependency 'nmatrix', NMatrix::VERSION::STRING\nend\n\n"
  },
  {
    "path": "nmatrix.gemspec",
    "content": "lib = File.expand_path('../lib/', __FILE__)\n$:.unshift lib unless $:.include?(lib)\n\nrequire 'nmatrix/version'\n\n#get files that are used by plugins rather than the main nmatrix gem\nplugin_files = []\nDir[\"nmatrix-*.gemspec\"].each do |gemspec_file|\n  gemspec = eval(File.read(gemspec_file))\n  plugin_files += gemspec.files\nend\nplugin_lib_files = plugin_files.select { |file| file.match(/^lib\\//) }\n\nGem::Specification.new do |gem|\n  gem.name = \"nmatrix\"\n  gem.version = NMatrix::VERSION::STRING\n  gem.summary = \"NMatrix is a linear algebra library for Ruby\"\n  gem.description = \"NMatrix is a linear algebra library for Ruby, written mostly in C and C++.\"\n  gem.homepage = 'http://sciruby.com'\n  gem.authors = ['John Woods', 'Chris Wailes', 'Aleksey Timin']\n  gem.email =  ['john.o.woods@gmail.com']\n  gem.license = 'BSD-3-Clause'\n  gem.post_install_message = <<-EOF\n***********************************************************\nWelcome to SciRuby: Tools for Scientific Computing in Ruby!\n\nNMatrix requires a C/C++ compiler. Clang and GCC are \nrecommended. JRuby support is experimental, and requires\nJava.\n\nIf you are upgrading from NMatrix 0.1.0 and rely on\nATLAS features, please check the README.\n\nFaster matrix calculations and more advanced linear\nalgebra features are available by installing either\nthe nmatrix-atlas or nmatrix-lapacke plugins.\n\nMore explicit instructions for NMatrix and SciRuby should\nbe available on the SciRuby website, sciruby.com, or\nthrough our mailing list (which can be found on our web-\nsite).\n\nThanks for trying out NMatrix! Happy coding!\n\n***********************************************************\nEOF\n\n  gem.files         = `git ls-files -- ext/nmatrix`.split(\"\\n\")\n  gem.files         += `git ls-files -- lib`.split(\"\\n\")\n  gem.files         -= plugin_lib_files\n  gem.test_files    = `git ls-files -- spec`.split(\"\\n\")\n  gem.test_files    -= `git ls-files -- spec/plugins`.split(\"\\n\")\n  gem.extensions = ['ext/nmatrix/extconf.rb']\n  gem.require_paths = [\"lib\"]\n\n  gem.required_ruby_version = '>= 1.9'\n\n  gem.add_dependency 'packable', '~> 1.3', '>= 1.3.5'\n  gem.add_development_dependency 'bundler', '~>1.6'\n  gem.add_development_dependency 'pry', '~>0.10'\n  gem.add_development_dependency 'rake', '~>10.3'\n  gem.add_development_dependency 'rake-compiler', '~>0.8'\n  gem.add_development_dependency 'rdoc', '~>4.0', '>=4.0.1'\n  gem.add_development_dependency 'rspec', '~>2.14'\n  gem.add_development_dependency 'rspec-longrun', '~>1.0'\nend\n\n"
  },
  {
    "path": "scripts/benchmarks/nmatrix_creation.rb",
    "content": ""
  },
  {
    "path": "scripts/switch_lapack_ubuntu.rb",
    "content": "#!/usr/bin/env ruby\n\nif ARGV[0] == \"atlas\"\n  lapack_prefix = \"/usr/lib/atlas-base/atlas\"\n  blas_prefix = \"/usr/lib/atlas-base/atlas\"\nelsif ARGV[0] == \"openblas\"\n  lapack_prefix = \"/usr/lib/openblas-base\"\n  blas_prefix = \"/usr/lib/openblas-base\"\nelsif ARGV[0] == \"ref\"\n  lapack_prefix = \"/usr/lib/lapack\"\n  blas_prefix = \"/usr/lib/libblas\"\nelse\n  puts \"options are atlas, openblas, or ref\"\n  exit\nend\n\ndef run(cmd)\n  puts \"> #{cmd}\"\n  system cmd\nend\n\n\nrun \"update-alternatives --set liblapack.so.3 #{lapack_prefix}/liblapack.so.3\"\nrun \"update-alternatives --set liblapack.so #{lapack_prefix}/liblapack.so\"\nrun \"update-alternatives --set libblas.so.3 #{blas_prefix}/libblas.so.3\"\nrun \"update-alternatives --set libblas.so #{blas_prefix}/libblas.so\"\n"
  },
  {
    "path": "scripts/ttable_helper.rb",
    "content": "#!/usr/bin/ruby\n\n# A helper file for generating and maintaining template tables.\n\nDTYPES = [\n          :uint8_t,\n          :int8_t,\n          :int16_t,\n          :int32_t,\n          :int64_t,\n          :float32_t,\n          :float64_t,\n          :'nm::Complex64',\n          :'nm::Complex128',\n          :'nm::RubyObject'\n         ]\n\ndef nullify(disabled = []) #:nodoc:\n  DTYPES.map { |t| if disabled.include?(t) then :NULL else t end }\nend\n\nITYPES = [\n          :uint8_t,\n          :uint16_t,\n          :uint32_t,\n          :uint64_t\n         ]\n\nEWOPS = [\n         :'nm::EW_ADD',\n         :'nm::EW_SUB',\n         :'nm::EW_MUL',\n         :'nm::EW_DIV',\n         :'nm::EW_POW',\n         :'nm::EW_MOD',\n         :'nm::EW_EQEQ',\n         :'nm::EW_NEQ',\n         :'nm::EW_LT',\n         :'nm::EW_GT',\n         :'nm::EW_LEQ',\n         :'nm::EW_GEQ'\n        ]\n\nLR_ALLOWED = {\n  :uint8_t       => DTYPES,\n  :int8_t        => DTYPES,\n  :int16_t      => DTYPES,\n  :int32_t      => DTYPES,\n  :int64_t      => DTYPES,\n  :float32_t    => DTYPES,\n  :float64_t    => DTYPES,\n  :'nm::Complex64'    => DTYPES,\n  :'nm::Complex128'    => DTYPES,\n  :'nm::RubyObject'    => DTYPES\n}\n\nlines =\n  case ARGV[0]\n  when 'OPLR'\n    '{' +\n      EWOPS.map do |op|\n\n    '{' +\n      DTYPES.map do |l_dtype|\n\n      '{' +\n        LR_ALLOWED[l_dtype].map do |r_dtype|\n        if r_dtype == :NULL\n          'NULL'\n        else\n          \"fun<#{op}, #{l_dtype}, #{r_dtype}>\"\n        end\n      end.join(', ') +\n        '}'\n\n    end.join(\",\\n\") +\n      '}'\n\n  end.join(\",\\n\") +\n      '}'\n\n  when 'OPID'\n    '{' +\n      EWOPS.map do |op|\n    '{' +\n      ITYPES.map do |itype|\n      '{' +\n        DTYPES.map do |dtype|\n\n        if dtype == :NULL\n          'NULL'\n        else\n          \"fun<#{op}, #{itype}, #{dtype}>\"\n        end\n\n      end.join(\",\") +\n        '}'\n    end.join(\",\\\\\\n\") +\n      '}'\n  end.join(\",\\\\\\n\") +\n      '}'\n\n  when 'LR'\n    '{' + DTYPES.map do |l_dtype|\n      '{' + LR_ALLOWED[l_dtype].map do |r_dtype|\n        if r_dtype == :NULL\n          'NULL'\n        else\n          \"fun<#{l_dtype}, #{r_dtype}>\"\n        end\n      end.join(', ') + '}'\n    end.join(\",\\n\") + '}'\n  end\n\nputs lines\n"
  },
  {
    "path": "spec/00_nmatrix_spec.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == 00_nmatrix_spec.rb\n#\n# Basic tests for NMatrix. These should load first, as they're\n# essential to NMatrix operation.\n#\nrequire 'spec_helper'\n\ndescribe NMatrix do\n  it \"creates a matrix with the new constructor\" do\n    n = NMatrix.new([2,2], [0,1,2,3], dtype: :int64)\n    expect(n.shape).to eq([2,2])\n    expect(n.entries).to eq([0,1,2,3])\n    expect(n.dtype).to eq(:int64)\n  end\n\n  it \"adequately requires information to access a single entry of a dense matrix\" do\n    n = NMatrix.new(:dense, 4, [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15], :float64)\n    expect(n[0,0]).to eq(0)\n    expect { n[0] }.to raise_error(ArgumentError)\n  end\n\n  it \"calculates exact determinants on small dense matrices\" do\n    expect(NMatrix.new(2, [1,2,3,4], stype: :dense, dtype: :int64).det_exact).to eq(-2)\n    expect(NMatrix.new(3, [1,2,3,0,5,6,7,8,0], stype: :dense, dtype: :int64)\n           .det_exact).to eq(-69)\n  end\n\n  it \"calculates exact determinants on small yale square matrices\" do\n    expect(NMatrix.new(2, [1,2,3,4], stype: :yale, dtype: :int64).det_exact).to eq(-2)\n    expect(NMatrix.new(3, [1,2,3,0,5,6,7,8,0], stype: :yale, dtype: :int64)\n           .det_exact).to eq(-69)\n  end\n\n  it \"calculates exact determinants on small list square matrices\" do\n    expect(NMatrix.new(2, [1,2,3,4], stype: :list, dtype: :int64).det_exact).to eq(-2)\n    expect(NMatrix.new(3, [1,2,3,0,5,6,7,8,0], stype: :list, dtype: :int64)\n           .det_exact).to eq(-69)\n  end\n\n  it \"calculates inverse exact determinants on small dense matrices\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    a = NMatrix.new(3, [1,2,3,0,1,4,5,6,0], stype: :dense, dtype: :int64)\n    inversed = a.method(:__inverse_exact__).call(a.clone, 3, 3)\n    b = NMatrix.new(3, [-24,18,5,20,-15,-4,-5,4,1], stype: :dense, dtype: :int64)\n    expect(inversed).to eq(b)\n\n    c = NMatrix.new(3, [1,0,3,0,0,1,0,6,0], stype: :dense, dtype: :int64)\n    inversed = c.method(:__inverse_exact__).call(c.clone, 3, 3)\n    d = NMatrix.new(3, [1,-3,0,0,0,0,0,1,0], stype: :dense, dtype: :int64)\n    expect(inversed).to eq(d)\n\n    e = NMatrix.new(2, [3,1,2,1], stype: :dense, dtype: :int64)\n    inversed = e.method(:__inverse_exact__).call(e.clone, 2, 2)\n    f = NMatrix.new(2, [1,-1,-2,3], stype: :dense, dtype: :int64)\n    expect(inversed).to eq(f)\n  end\n\n  it \"calculates inverse exact determinants on small yale matrices\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    a = NMatrix.new(3, [1,2,3,0,1,4,5,6,0], stype: :yale, dtype: :int64)\n    inversed = a.method(:__inverse_exact__).call(a.clone, 3, 3)\n    b = NMatrix.new(3, [-24,18,5,20,-15,-4,-5,4,1], stype: :yale, dtype: :int64)\n    expect(inversed).to eq(b)\n\n    c = NMatrix.new(3, [1,0,3,0,0,1,0,6,0], stype: :yale, dtype: :int64)\n    inversed = c.method(:__inverse_exact__).call(c.clone, 3, 3)\n    d = NMatrix.new(3, [1,-3,0,0,0,0,0,1,0], stype: :yale, dtype: :int64)\n    expect(inversed).to eq(d)\n\n    e = NMatrix.new(2, [3,1,2,1], stype: :yale, dtype: :int64)\n    inversed = e.method(:__inverse_exact__).call(e.clone, 2, 2)\n    f = NMatrix.new(2, [1,-1,-2,3], stype: :yale, dtype: :int64)\n    expect(inversed).to eq(f)\n  end\n\n  it \"calculates inverse exact determinants on small list matrices\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    a = NMatrix.new(3, [1,2,3,0,1,4,5,6,0], stype: :list, dtype: :int64)\n    inversed = a.method(:__inverse_exact__).call(a.clone, 3, 3)\n    b = NMatrix.new(3, [-24,18,5,20,-15,-4,-5,4,1], stype: :list, dtype: :int64)\n    expect(inversed).to eq(b)\n\n    c = NMatrix.new(2, [3,1,2,1], stype: :list, dtype: :int64)\n    inversed = c.method(:__inverse_exact__).call(c.clone, 2, 2)\n    d = NMatrix.new(2, [1,-1,-2,3], stype: :list, dtype: :int64)\n    expect(inversed).to eq(d)\n  end\n\n  it \"calculates determinants\" do\n    expect(NMatrix.new(3, [-2,2,3,-1,1,3,2,0,-1], stype: :dense, dtype: :int64).det).to eq(6)\n  end\n\n  it \"allows casting to Ruby objects\" do\n    m = NMatrix.new([3,3], [0,0,1,0,2,0,3,4,5], dtype: :int64, stype: :dense)\n    n = m.cast(:dense, :object)\n    expect(n).to eq(m)\n  end\n\n  it \"allows casting from Ruby objects\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    m = NMatrix.new(:dense, [3,3], [0,0,1,0,2,0,3,4,5], :object)\n    n = m.cast(:dense, :int64)\n    expect(m).to eq(n)\n  end\n\n  it \"allows stype casting of a dim 2 matrix between dense, sparse, and list (different dtypes)\" do\n    m = NMatrix.new(:dense, [3,3], [0,0,1,0,2,0,3,4,5], :int64).\n      cast(:yale, :int32).\n      cast(:dense, :float64).\n      cast(:list, :object).\n      cast(:dense, :int16).\n      cast(:list, :int32).\n      cast(:yale, :int64) #.\n    #cast(:list, :int32).\n    #cast(:dense, :int16)\n    #m.should.equal?(original)\n    # For some reason this causes some weird garbage collector problems when we uncomment these. The above lines won't\n    # work at all in IRB, but work fine when run in a regular Ruby session.\n  end\n\n  it \"fills dense Ruby object matrix with nil\" do\n    n = NMatrix.new([4,3], dtype: :object)\n    pending(\"not yet implemented for object dtype for NMatrix-JRuby\") if jruby?\n    expect(n[0,0]).to eq(nil)\n  end\n\n  it \"fills dense with individual assignments\" do\n    n = NMatrix.new([4,3], dtype: :float64)\n    n[0,0] = 14.0\n    n[0,1] = 9.0\n    n[0,2] = 3.0\n    n[1,0] = 2.0\n    n[1,1] = 11.0\n    n[1,2] = 15.0\n    n[2,0] = 0.0\n    n[2,1] = 12.0\n    n[2,2] = 17.0\n    n[3,0] = 5.0\n    n[3,1] = 2.0\n    n[3,2] = 3.0\n\n    expect(n[0,0]).to eq(14.0)\n    expect(n[0,1]).to eq(9.0)\n    expect(n[0,2]).to eq(3.0)\n    expect(n[1,0]).to eq(2.0)\n    expect(n[1,1]).to eq(11.0)\n    expect(n[1,2]).to eq(15.0)\n    expect(n[2,0]).to eq(0.0)\n    expect(n[2,1]).to eq(12.0)\n    expect(n[2,2]).to eq(17.0)\n    expect(n[3,0]).to eq(5.0)\n    expect(n[3,1]).to eq(2.0)\n    expect(n[3,2]).to eq(3.0)\n  end\n\n  it \"fills dense with a single mass assignment\" do\n    n = NMatrix.new([4,3], [14.0, 9.0, 3.0, 2.0, 11.0, 15.0, 0.0, 12.0, 17.0, 5.0, 2.0, 3.0])\n\n    expect(n[0,0]).to eq(14.0)\n    expect(n[0,1]).to eq(9.0)\n    expect(n[0,2]).to eq(3.0)\n    expect(n[1,0]).to eq(2.0)\n    expect(n[1,1]).to eq(11.0)\n    expect(n[1,2]).to eq(15.0)\n    expect(n[2,0]).to eq(0.0)\n    expect(n[2,1]).to eq(12.0)\n    expect(n[2,2]).to eq(17.0)\n    expect(n[3,0]).to eq(5.0)\n    expect(n[3,1]).to eq(2.0)\n    expect(n[3,2]).to eq(3.0)\n  end\n\n  it \"fills dense with a single mass assignment, with dtype specified\" do\n    m = NMatrix.new([4,3], [14.0, 9.0, 3.0, 2.0, 11.0, 15.0, 0.0, 12.0, 17.0, 5.0, 2.0, 3.0], dtype: :float32)\n\n    expect(m[0,0]).to eq(14.0)\n    expect(m[0,1]).to eq(9.0)\n    expect(m[0,2]).to eq(3.0)\n    expect(m[1,0]).to eq(2.0)\n    expect(m[1,1]).to eq(11.0)\n    expect(m[1,2]).to eq(15.0)\n    expect(m[2,0]).to eq(0.0)\n    expect(m[2,1]).to eq(12.0)\n    expect(m[2,2]).to eq(17.0)\n    expect(m[3,0]).to eq(5.0)\n    expect(m[3,1]).to eq(2.0)\n    expect(m[3,2]).to eq(3.0)\n  end\n\n  it \"dense handles missing initialization value\" do\n    n = NMatrix.new(3, dtype: :int8)\n    pending(\"not yet implemented for int dtype for NMatrix-JRuby\") if jruby?\n    expect(n.stype).to eq(:dense)\n    expect(n.dtype).to eq(:int8)\n\n    m = NMatrix.new(4, dtype: :float64)\n    expect(m.stype).to eq(:dense)\n    expect(m.dtype).to eq(:float64)\n  end\n\n  [:dense, :list, :yale].each do |storage_type|\n    context storage_type do\n    it \"can be duplicated\" do\n        n = NMatrix.new([2,3], 1.1, stype: storage_type, dtype: :float64)\n        # FIXME\n        pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby? #and storage_type != :dense\n        expect(n.stype).to eq(storage_type)\n\n        n[0,0] = 0.0\n        n[0,1] = 0.1\n        n[1,0] = 1.0\n\n        m = n.dup\n        expect(m.shape).to eq(n.shape)\n        expect(m.dim).to eq(n.dim)\n        expect(m.object_id).not_to eq(n.object_id)\n        expect(m.stype).to eq(storage_type)\n        expect(m[0,0]).to eq(n[0,0])\n        m[0,0] = 3.0\n        expect(m[0,0]).not_to eq(n[0,0])\n      end\n\n      it \"enforces shape boundaries\" do\n        expect { NMatrix.new([1,10], 0, dtype: :int8, stype: storage_type, default: 0)[1,0]  }.to raise_error(RangeError)\n        expect { NMatrix.new([1,10], 0, dtype: :int8, stype: storage_type, default: 0)[0,10] }.to raise_error(RangeError)\n      end\n\n      it \"sets and gets\" do\n        n = NMatrix.new(2, 0, stype: storage_type, dtype: :int8)\n        n[0,1] = 1\n        expect(n[0,0]).to eq(0)\n        expect(n[1,0]).to eq(0)\n        expect(n[0,1]).to eq(1)\n        expect(n[1,1]).to eq(0)\n      end\n\n      it \"sets and gets references\" do\n        n = NMatrix.new(2, stype: storage_type, dtype: :int8, default: 0)\n        expect(n[0,1] = 1).to eq(1)\n        expect(n[0,1]).to eq(1)\n      end\n\n      # Tests Ruby object versus any C dtype (in this case we use :int64)\n      [:object, :int64].each do |dtype|\n        c = dtype == :object ? \"Ruby object\" : \"non-Ruby object\"\n        context c do\n          it \"allows iteration of matrices\" do\n            n = nil\n            if storage_type == :dense\n              n = NMatrix.new(:dense, [3,3], [1,2,3,4,5,6,7,8,9], dtype)\n            else\n              n = NMatrix.new([3,4], 0, stype: storage_type, dtype: dtype)\n              n[0,0] = 1\n              n[0,1] = 2\n              n[2,3] = 4\n              n[2,0] = 3\n            end\n\n            ary = []\n            n.each do |x|\n              ary << x\n            end\n\n            if storage_type == :dense\n              expect(ary).to eq([1,2,3,4,5,6,7,8,9])\n            else\n              expect(ary).to eq([1,2,0,0,0,0,0,0,3,0,0,4])\n            end\n          end\n\n          it \"allows storage-based iteration of matrices\" do\n            pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby? and storage_type != :dense\n            STDERR.puts storage_type.inspect\n            STDERR.puts dtype.inspect\n            n = NMatrix.new([3,3], 0, stype: storage_type, dtype: dtype)\n            n[0,0] = 1\n            n[0,1] = 2\n            n[2,0] = 5 if storage_type == :yale\n            n[2,1] = 4\n            n[2,2] = 3\n\n            values = []\n            is = []\n            js = []\n\n            n.each_stored_with_indices do |v,i,j|\n              values << v\n              is << i\n              js << j\n            end\n\n            if storage_type == :yale\n              expect(is).to     eq([0,1,2,0,2,2])\n              expect(js).to     eq([0,1,2,1,0,1])\n              expect(values).to eq([1,0,3,2,5,4])\n            elsif storage_type == :list\n              expect(values).to eq([1,2,4,3])\n              expect(is).to     eq([0,0,2,2])\n              expect(js).to     eq([0,1,1,2])\n            elsif storage_type == :dense\n              expect(values).to eq([1,2,0,0,0,0,0,4,3])\n              expect(is).to     eq([0,0,0,1,1,1,2,2,2])\n              expect(js).to     eq([0,1,2,0,1,2,0,1,2])\n            end\n          end\n        end\n      end\n    end\n\n    # dense and list, not yale\n    context \"(storage: #{storage_type})\" do\n      it \"gets default value\" do\n        pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n        expect(NMatrix.new(3, 0, stype: storage_type)[1,1]).to eq(0)\n        expect(NMatrix.new(3, 0.1, stype: storage_type)[1,1]).to eq(0.1)\n        expect(NMatrix.new(3, 1, stype: storage_type)[1,1]).to eq(1)\n\n      end\n      it \"returns shape and dim\" do\n        expect(NMatrix.new([3,2,8], 0, stype: storage_type).shape).to eq([3,2,8])\n        expect(NMatrix.new([3,2,8], 0, stype: storage_type).dim).to eq(3)\n      end\n\n      it \"returns number of rows and columns\" do\n        expect(NMatrix.new([7, 4], 3, stype: storage_type).rows).to eq(7)\n        expect(NMatrix.new([7, 4], 3, stype: storage_type).cols).to eq(4)\n      end\n    end unless storage_type == :yale\n  end\n\n\n  it \"handles dense construction\" do\n    expect(NMatrix.new(3,0)[1,1]).to eq(0)\n    expect(lambda { NMatrix.new(3,dtype: :int8)[1,1] }).to_not raise_error\n  end\n\n  it \"converts from list to yale properly\" do\n    m = NMatrix.new(3, 0, stype: :list)\n    m[0,2] = 333\n    m[2,2] = 777\n    n = m.cast(:yale, :int32)\n    #puts n.capacity\n    #n.extend NMatrix::YaleFunctions\n    #puts n.yale_ija.inspect\n    #puts n.yale_a.inspect\n\n    expect(n[0,0]).to eq(0)\n    expect(n[0,1]).to eq(0)\n    expect(n[0,2]).to eq(333)\n    expect(n[1,0]).to eq(0)\n    expect(n[1,1]).to eq(0)\n    expect(n[1,2]).to eq(0)\n    expect(n[2,0]).to eq(0)\n    expect(n[2,1]).to eq(0)\n    expect(n[2,2]).to eq(777)\n  end\n\n  it \"should return an enumerator when each is called without a block\" do\n    a = NMatrix.new(2, 1)\n    b = NMatrix.new(2, [-1,0,1,0])\n    enums = [a.each, b.each]\n\n    begin\n      atans = []\n      atans << Math.atan2(*enums.map(&:next)) while true\n    rescue StopIteration\n    end\n  end\n\n  context \"dense\" do\n    it \"should return the matrix being iterated over when each is called with a block\" do\n      # FIXME\n      pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n      a = NMatrix.new(2, 1)\n      val = (a.each { })\n      expect(val).to eq(a)\n    end\n\n    it \"should return the matrix being iterated over when each_stored_with_indices is called with a block\" do\n      # FIXME\n      pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n      a = NMatrix.new(2,1)\n      val = (a.each_stored_with_indices { })\n      expect(val).to eq(a)\n    end\n  end\n\n  [:list, :yale].each do |storage_type|\n    context storage_type do\n      it \"should return the matrix being iterated over when each_stored_with_indices is called with a block\" do\n        pending(\"not yet implemented for Complex dtype for NMatrix-JRuby\") if jruby?\n        n = NMatrix.new([2,3], 1.1, stype: storage_type, dtype: :float64, default: 0)\n        val = (n.each_stored_with_indices { })\n        expect(val).to eq(n)\n      end\n\n      it \"should return an enumerator when each_stored_with_indices is called without a block\" do\n        pending(\"not yet implemented for Complex dtype for NMatrix-JRuby\") if jruby?\n        n = NMatrix.new([2,3], 1.1, stype: storage_type, dtype: :float64, default: 0)\n        val = n.each_stored_with_indices\n        expect(val).to be_a Enumerator\n      end\n    end\n  end\n\n  it \"should iterate through element 256 without a segfault\" do\n    t = NVector.random(256)\n    t.each { |x| x + 0 }\n  end\nend\n\n\ndescribe 'NMatrix' do\n  context \"#upper_triangle\" do\n    it \"should create a copy with the lower corner set to zero\" do\n      n = NMatrix.seq(4)+1\n      expect(n.upper_triangle).to eq(NMatrix.new(4, [1,2,3,4,0,6,7,8,0,0,11,12,0,0,0,16]))\n      expect(n.upper_triangle(2)).to eq(NMatrix.new(4, [1,2,3,4,5,6,7,8,9,10,11,12,0,14,15,16]))\n    end\n  end\n\n  context \"#lower_triangle\" do\n    it \"should create a copy with the lower corner set to zero\" do\n      n = NMatrix.seq(4)+1\n      expect(n.lower_triangle).to eq(NMatrix.new(4, [1,0,0,0,5,6,0,0,9,10,11,0,13,14,15,16]))\n      expect(n.lower_triangle(2)).to eq(NMatrix.new(4, [1,2,3,0,5,6,7,8,9,10,11,12,13,14,15,16]))\n    end\n  end\n\n  context \"#upper_triangle!\" do\n    it \"should create a copy with the lower corner set to zero\" do\n      n = NMatrix.seq(4)+1\n      expect(n.upper_triangle!).to eq(NMatrix.new(4, [1,2,3,4,0,6,7,8,0,0,11,12,0,0,0,16]))\n      n = NMatrix.seq(4)+1\n      expect(n.upper_triangle!(2)).to eq(NMatrix.new(4, [1,2,3,4,5,6,7,8,9,10,11,12,0,14,15,16]))\n    end\n  end\n\n  context \"#lower_triangle!\" do\n    it \"should create a copy with the lower corner set to zero\" do\n      n = NMatrix.seq(4)+1\n      expect(n.lower_triangle!).to eq(NMatrix.new(4, [1,0,0,0,5,6,0,0,9,10,11,0,13,14,15,16]))\n      n = NMatrix.seq(4)+1\n      expect(n.lower_triangle!(2)).to eq(NMatrix.new(4, [1,2,3,0,5,6,7,8,9,10,11,12,13,14,15,16]))\n    end\n  end\n\n  context \"#rank\" do\n    it \"should get the rank of a 2-dimensional matrix\" do\n      n = NMatrix.seq([2,3])\n      expect(n.rank(0, 0)).to eq(N[[0,1,2]])\n    end\n\n    it \"should raise an error when the rank is out of bounds\" do\n      n = NMatrix.seq([2,3])\n      expect { n.rank(2, 0) }.to raise_error(RangeError)\n    end\n  end\n\n  context \"#reshape\" do\n    it \"should change the shape of a matrix without the contents changing\" do\n      # FIXME\n      pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n      n = NMatrix.seq(4)+1\n      expect(n.reshape([8,2]).to_flat_array).to eq(n.to_flat_array)\n    end\n\n    it \"should permit a change of dimensionality\" do\n      # FIXME\n      pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n      n = NMatrix.seq(4)+1\n      expect(n.reshape([8,1,2]).to_flat_array).to eq(n.to_flat_array)\n    end\n\n    it \"should prevent a resize\" do\n      n = NMatrix.seq(4)+1\n      expect { n.reshape([5,2]) }.to raise_error(ArgumentError)\n    end\n\n    it \"should do the reshape operation in place\" do\n      n = NMatrix.seq(4)+1\n      expect(n.reshape!([8,2]).eql?(n)).to eq(true) # because n itself changes\n    end\n\n    it \"should do the reshape operation in place, changing dimension\" do\n      # FIXME\n      pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n      n = NMatrix.seq(4)\n      a = n.reshape!([4,2,2])\n      expect(n).to eq(NMatrix.seq([4,2,2]))\n      expect(a).to eq(NMatrix.seq([4,2,2]))\n    end\n\n    it \"reshape and reshape! must produce same result\" do\n      # FIXME\n      pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n      n = NMatrix.seq(4)+1\n      a = NMatrix.seq(4)+1\n      expect(n.reshape!([8,2])==a.reshape(8,2)).to eq(true) # because n itself changes\n    end\n\n    it \"should prevent a resize in place\" do\n      n = NMatrix.seq(4)+1\n      expect { n.reshape!([5,2]) }.to raise_error(ArgumentError)\n    end\n  end\n\n  context \"#transpose\" do\n    [:dense, :list, :yale].each do |stype|\n      context(stype) do\n        it \"should transpose a #{stype} matrix (2-dimensional)\" do\n          n = NMatrix.seq(4, stype: stype)\n          expect(n.transpose.to_a.flatten).to eq([0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15])\n        end\n      end\n    end\n\n    [:dense, :list].each do |stype|\n      context(stype) do\n        it \"should transpose a #{stype} matrix (3-dimensional)\" do\n          n = NMatrix.new([4,4,1], [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15], stype: stype)\n          expect(n.transpose([2,1,0]).to_flat_array).to eq([0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15])\n          expect(n.transpose([1,0,2]).to_flat_array).to eq([0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15])\n          expect(n.transpose([0,2,1]).to_flat_array).to eq(n.to_flat_array) # for dense, make this reshape!\n        end\n      end\n\n      it \"should just copy a 1-dimensional #{stype} matrix\" do\n        n = NMatrix.new([3], [1,2,3], stype: stype)\n        expect(n.transpose).to eq n\n        expect(n.transpose).not_to be n\n      end\n\n      it \"should check permute argument if supplied for #{stype} matrix\" do\n        n = NMatrix.new([2,2], [1,2,3,4], stype: stype)\n        expect{n.transpose *4 }.to raise_error(ArgumentError)\n        expect{n.transpose [1,1,2] }.to raise_error(ArgumentError)\n      end\n    end\n  end\n\n  context \"#dot_product\" do\n    [:dense].each do |stype| # list storage transpose not yet implemented\n      context(stype) do # yale support only 2-dim matrix\n        it \"should work like vector product on a #{stype} (1-dimensional)\" do\n          # FIXME\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          m = NMatrix.new([3], [1,2,3], stype: stype)\n          expect(m.dot(m)).to eq (NMatrix.new([1],[14]))\n        end\n      end\n    end\n  end\n\n  context \"#==\" do\n    [:dense, :list, :yale].each do |left|\n      [:dense, :list, :yale].each do |right|\n        context (\"#{left}?#{right}\") do\n          it \"tests equality of two equal matrices\" do\n            n = NMatrix.new([3,4], [0,0,1,2,0,0,3,4,0,0,0,0], stype: left)\n            m = NMatrix.new([3,4], [0,0,1,2,0,0,3,4,0,0,0,0], stype: right)\n\n            expect(n==m).to eq(true)\n          end\n\n          it \"tests equality of two unequal matrices\" do\n            n = NMatrix.new([3,4], [0,0,1,2,0,0,3,4,0,0,0,1], stype: left)\n            m = NMatrix.new([3,4], [0,0,1,2,0,0,3,4,0,0,0,0], stype: right)\n\n            expect(n==m).to eq(false)\n          end\n\n          it \"tests equality of matrices with different shapes\" do\n            n = NMatrix.new([2,2], [1,2, 3,4], stype: left)\n            m = NMatrix.new([2,3], [1,2, 3,4, 5,6], stype: right)\n            x = NMatrix.new([1,4], [1,2, 3,4], stype: right)\n\n            expect{n==m}.to raise_error(ShapeError)\n            expect{n==x}.to raise_error(ShapeError)\n          end\n\n          it \"tests equality of matrices with different dimension\" do\n            n = NMatrix.new([2,1], [1,2], stype: left)\n            m = NMatrix.new([2], [1,2], stype: right)\n\n            expect{n==m}.to raise_error(ShapeError)\n          end if left != :yale && right != :yale # yale must have dimension 2\n        end\n      end\n    end\n  end\n\n  context \"#concat\" do\n    it \"should default to horizontal concatenation\" do\n      n = NMatrix.new([1,3], [1,2,3])\n      expect(n.concat(n)).to eq(NMatrix.new([1,6], [1,2,3,1,2,3]))\n    end\n\n    it \"should permit vertical concatenation\" do\n      n = NMatrix.new([1,3], [1,2,3])\n      expect(n.vconcat(n)).to eq(NMatrix.new([2,3], [1,2,3]))\n    end\n\n    it \"should permit depth concatenation on tensors\" do\n      # FIXME\n      pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n      n = NMatrix.new([1,3,1], [1,2,3])\n      expect(n.dconcat(n)).to eq(NMatrix.new([1,3,2], [1,1,2,2,3,3]))\n    end\n\n    it \"should work on matrices with different size along concat dim\" do\n      n = N[[1, 2, 3],\n            [4, 5, 6]]\n      m = N[[7],\n            [8]]\n\n      # FIXME\n      pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n      expect(n.hconcat(m)).to eq N[[1, 2, 3, 7], [4, 5, 6, 8]]\n      expect(m.hconcat(n)).to eq N[[7, 1, 2, 3], [8, 4, 5, 6]]\n    end\n\n    it \"should work on matrices with different size along concat dim\" do\n      n = N[[1, 2, 3],\n            [4, 5, 6]]\n\n      m = N[[7, 8, 9]]\n\n      # FIXME\n      pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n      expect(n.vconcat(m)).to eq N[[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n      expect(m.vconcat(n)).to eq N[[7, 8, 9], [1, 2, 3], [4, 5, 6]]\n    end\n  end\n\n  context \"#[]\" do\n    it \"should return values based on indices\" do\n      n = NMatrix.new([2,5], [1,2,3,4,5,6,7,8,9,0])\n      expect(n[1,0]).to eq 6\n      expect(n[1,0..3]).to eq NMatrix.new([1,4],[6,7,8,9])\n    end\n\n    it \"should work for negative indices\" do\n      n = NMatrix.new([1,5], [1,2,3,4,5])\n      expect(n[-1]).to eq(5)\n      expect(n[0,0..-2]).to eq(NMatrix.new([1,4],[1,2,3,4]))\n    end\n  end\n\n  context \"#complex_conjugate!\" do\n    [:dense, :yale, :list].each do |stype|\n      context(stype) do\n        it \"should work in-place for complex dtypes\" do\n          pending(\"not yet implemented for list stype\") if stype == :list\n          pending(\"not yet implemented for Complex dtype for NMatrix-JRuby\") if jruby?\n          n = NMatrix.new([2,3], [Complex(2,3)], stype: stype, dtype: :complex128)\n          n.complex_conjugate!\n          expect(n).to eq(NMatrix.new([2,3], [Complex(2,-3)], stype: stype, dtype: :complex128))\n        end\n\n        [:object, :int64].each do |dtype|\n          it \"should work in-place for non-complex dtypes\" do\n            pending(\"not yet implemented for list stype\") if stype == :list\n            pending(\"not yet implemented for Complex dtype for NMatrix-JRuby\") if jruby?\n            n = NMatrix.new([2,3], 1, stype: stype, dtype: dtype)\n            n.complex_conjugate!\n            expect(n).to eq(NMatrix.new([2,3], [1], stype: stype, dtype: dtype))\n          end\n        end\n      end\n    end\n  end\n\n  context \"#complex_conjugate\" do\n    [:dense, :yale, :list].each do |stype|\n      context(stype) do\n        it \"should work out-of-place for complex dtypes\" do\n          pending(\"not yet implemented for list stype\") if stype == :list\n          pending(\"not yet implemented for Complex dtype for NMatrix-JRuby\") if jruby?\n          n = NMatrix.new([2,3], [Complex(2,3)], stype: stype, dtype: :complex128)\n          expect(n.complex_conjugate).to eq(NMatrix.new([2,3], [Complex(2,-3)], stype: stype, dtype: :complex128))\n        end\n\n        [:object, :int64].each do |dtype|\n          it \"should work out-of-place for non-complex dtypes\" do\n            pending(\"not yet implemented for list stype\") if stype == :list\n            pending(\"not yet implemented for Complex dtype for NMatrix-JRuby\") if jruby?\n            n = NMatrix.new([2,3], 1, stype: stype, dtype: dtype)\n            expect(n.complex_conjugate).to eq(NMatrix.new([2,3], [1], stype: stype, dtype: dtype))\n          end\n        end\n      end\n    end\n  end\n\n  context \"#inject\" do\n    it \"should sum columns of yale matrix correctly\" do\n      n = NMatrix.new([4, 3], stype: :yale, default: 0)\n      n[0,0] = 1\n      n[1,1] = 2\n      n[2,2] = 4\n      n[3,2] = 8\n      column_sums = []\n      n.cols.times do |i|\n        column_sums << n.col(i).inject(:+)\n      end\n      expect(column_sums).to eq([1, 2, 12])\n    end\n  end\n\n  context \"#index\" do\n    it \"returns index of first occurence of an element for a vector\" do\n      n = NMatrix.new([5], [0,22,22,11,11])\n\n      expect(n.index(22)).to eq([1])\n    end\n\n    it \"returns index of first occurence of an element for 2-D matrix\" do\n      n = NMatrix.new([3,3], [23,11,23,\n                              44, 2, 0,\n                              33, 0, 32])\n\n      expect(n.index(0)).to eq([1,2])\n    end\n\n    it \"returns index of first occerence of an element for N-D matrix\" do\n      n = NMatrix.new([3,3,3], [23,11,23, 44, 2, 0, 33, 0, 32,\n                                23,11,23, 44, 2, 0, 33, 0, 32,\n                                23,11,23, 44, 2, 0, 33, 0, 32])\n\n      expect(n.index(44)).to eq([0,1,0])\n    end\n  end\n\n  context \"#last\" do\n    it \"returns the last element of a 1-dimensional NMatrix\" do\n      n = NMatrix.new([1,4], [1,2,3,4])\n      expect(n.last).to eq(4)\n    end\n\n    it \"returns the last element of a 2-dimensional NMatrix\" do\n      n = NMatrix.new([2,2], [4,8,12,16])\n      expect(n.last).to eq(16)\n    end\n\n    it \"returns the last element of a 3-dimensional NMatrix\" do\n      n = NMatrix.new([2,2,2], [1,2,3,4,5,6,7,8])\n      expect(n.last).to eq(8)\n    end\n  end\n\n  context \"#diagonal\" do\n    ALL_DTYPES.each do |dtype|\n      before do\n        @square_matrix =  NMatrix.new([3,3], [\n          23,11,23,\n          44, 2, 0,\n          33, 0, 32\n          ], dtype: dtype\n        )\n\n        @rect_matrix = NMatrix.new([4,3], [\n          23,11,23,\n          44, 2, 0,\n          33, 0,32,\n          11,22,33\n          ], dtype: dtype\n        )\n      end\n\n      it \"returns main diagonal for square matrix\" do\n        expect(@square_matrix.diagonal).to eq(NMatrix.new [3], [23,2,32])\n      end\n\n      it \"returns main diagonal for rectangular matrix\" do\n        expect(@rect_matrix.diagonal).to eq(NMatrix.new [3], [23,2,32])\n      end\n\n      it \"returns anti-diagonal for square matrix\" do\n        expect(@square_matrix.diagonal(false)).to eq(NMatrix.new [3], [23,2,33])\n      end\n\n      it \"returns anti-diagonal for rectangular matrix\" do\n        expect(@square_matrix.diagonal(false)).to eq(NMatrix.new [3], [23,2,33])\n      end\n    end\n  end\n\n  context \"#repeat\" do\n    before do\n      @sample_matrix = NMatrix.new([2, 2], [1, 2, 3, 4])\n    end\n\n    it \"checks count argument\" do\n      expect{@sample_matrix.repeat(1, 0)}.to raise_error(ArgumentError)\n      expect{@sample_matrix.repeat(-2, 0)}.to raise_error(ArgumentError)\n    end\n\n    it \"returns repeated matrix\" do\n      pending(\"Not yet implemented for NMatrix JRuby\") if jruby?\n      expect(@sample_matrix.repeat(2, 0)).to eq(NMatrix.new([4, 2], [1, 2, 3, 4, 1, 2, 3, 4]))\n      expect(@sample_matrix.repeat(2, 1)).to eq(NMatrix.new([2, 4], [1, 2, 1, 2, 3, 4, 3, 4]))\n    end\n\n    it \"preserves dtype\" do\n      # FIXME\n      pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n      expect(@sample_matrix.repeat(2, 0).dtype).to eq(@sample_matrix.dtype)\n      expect(@sample_matrix.repeat(2, 1).dtype).to eq(@sample_matrix.dtype)\n    end\n  end\n\n  context \"#meshgrid\" do\n    before do\n      @x, @y, @z = [1, 2, 3], NMatrix.new([2, 1], [4, 5]), [6, 7]\n      @two_dim = NMatrix.new([2, 2], [1, 2, 3, 4])\n      @two_dim_array = [[4], [5]]\n      @expected_result = [NMatrix.new([2, 3], [1, 2, 3, 1, 2, 3]), NMatrix.new([2, 3], [4, 4, 4, 5, 5, 5])]\n      @expected_for_ij = [NMatrix.new([3, 2], [1, 1, 2, 2, 3, 3]), NMatrix.new([3, 2], [4, 5, 4, 5, 4, 5])]\n      @expected_for_sparse = [NMatrix.new([1, 3], [1, 2, 3]), NMatrix.new([2, 1], [4, 5])]\n      @expected_for_sparse_ij = [NMatrix.new([3, 1], [1, 2, 3]), NMatrix.new([1, 2], [4, 5])]\n      # FIXME\n      @expected_3dim = [NMatrix.new([1, 3, 1], [1, 2, 3]).repeat(2, 0).repeat(2, 2),\n                        NMatrix.new([2, 1, 1], [4, 5]).repeat(3, 1).repeat(2, 2),\n                        NMatrix.new([1, 1, 2], [6, 7]).repeat(2, 0).repeat(3, 1)] unless jruby?\n      @expected_3dim_sparse_ij = [NMatrix.new([3, 1, 1], [1, 2, 3]),\n                                  NMatrix.new([1, 2, 1], [4, 5]),\n                                  NMatrix.new([1, 1, 2], [6, 7])]\n    end\n\n    it \"checks arrays count\" do\n      pending(\"Not yet implemented for NMatrix JRuby\") if jruby?\n      expect{NMatrix.meshgrid([@x])}.to raise_error(ArgumentError)\n      expect{NMatrix.meshgrid([])}.to raise_error(ArgumentError)\n    end\n\n    it \"flattens input arrays before use\" do\n      pending(\"Not yet implemented for NMatrix JRuby\") if jruby?\n      expect(NMatrix.meshgrid([@two_dim, @two_dim_array])).to eq(NMatrix.meshgrid([@two_dim.to_flat_array, @two_dim_array.flatten]))\n    end\n\n    it \"returns new NMatrixes\" do\n      pending(\"Not yet implemented for NMatrix JRuby\") if jruby?\n      expect(NMatrix.meshgrid([@x, @y])).to eq(@expected_result)\n    end\n\n    it \"has option :sparse\" do\n      pending(\"Not yet implemented for NMatrix JRuby\") if jruby?\n      expect(NMatrix.meshgrid([@x, @y], sparse: true)).to eq(@expected_for_sparse)\n    end\n\n    it \"has option :indexing\" do\n      pending(\"Not yet implemented for NMatrix JRuby\") if jruby?\n      expect(NMatrix.meshgrid([@x, @y], indexing: :ij)).to eq(@expected_for_ij)\n      expect(NMatrix.meshgrid([@x, @y], indexing: :xy)).to eq(@expected_result)\n      expect{NMatrix.meshgrid([@x, @y], indexing: :not_ij_not_xy)}.to raise_error(ArgumentError)\n    end\n\n    it \"works well with both options set\" do\n      pending(\"Not yet implemented for NMatrix JRuby\") if jruby?\n      expect(NMatrix.meshgrid([@x, @y], sparse: true, indexing: :ij)).to eq(@expected_for_sparse_ij)\n    end\n\n    it \"is able to take more than two arrays as arguments and works well with options\" do\n      pending(\"Not yet implemented for NMatrix JRuby\") if jruby?\n      expect(NMatrix.meshgrid([@x, @y, @z])).to eq(@expected_3dim)\n      expect(NMatrix.meshgrid([@x, @y, @z], sparse: true, indexing: :ij)).to eq(@expected_3dim_sparse_ij)\n    end\n  end\nend\n"
  },
  {
    "path": "spec/01_enum_spec.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == 01_enum_spec.rb\n#\n# Enumerator tests for NMatrix. These should load early, as they\n# test functionality essential to matrix printing.\n#\nrequire 'spec_helper'\n\ndescribe \"NMatrix enumeration for\" do\n  [:dense, :yale, :list].each do |stype|\n    context stype do\n      let(:n) { create_rectangular_matrix(stype) }\n      let(:m) { n[1..4,1..3] }\n\n      if stype == :yale\n        it \"should iterate properly along each row of a slice\" do\n          pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n          vv = []\n          ii = []\n          jj = []\n          m.extend NMatrix::YaleFunctions\n          m.each_row do |row|\n            row.each_with_indices do |v,i,j|\n              vv << v\n              ii << i\n              jj << j\n            end\n          end\n\n          expect(vv).to eq([7,8,9, 12,13,0, 0,0,0, 0,17,18])\n          expect(ii).to eq([0]*12)\n          expect(jj).to eq([0,1,2]*4)\n        end\n\n        it \"should iterate along diagonal portion of A array\" do\n          pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n          vv = []\n          ii = []\n          jj = []\n          n.send :__yale_stored_diagonal_each_with_indices__ do |v,i,j|\n            vv << v\n            ii << i\n            jj << j\n          end\n          expect(vv).to eq([1,7,13,0,19])\n          expect(ii).to eq([0,1,2,3,4])\n          expect(jj).to eq(ii)\n        end\n\n        it \"should iterate along non-diagonal portion of A array\" do\n          pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n          vv = []\n          ii = []\n          jj = []\n          n.send :__yale_stored_nondiagonal_each_with_indices__ do |v,i,j|\n            vv << v\n            ii << i\n            jj << j\n          end\n\n          expect(vv).to eq([2,3,4,5,  6,8,9,10,  11,12,14,15,  16,17,18,20])\n          expect(ii).to eq([[0]*4, [1]*4, [2]*4, [4]*4].flatten)\n          expect(jj).to eq([1,2,3,4,  0,2,3,5,   0,1,4,5,      0,2,3,5])\n        end\n\n        it \"should iterate along a sliced diagonal portion of an A array\" do\n          pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n          m = n[0..3,1..3]\n          vv = []\n          ii = []\n          jj = []\n          m.send :__yale_stored_diagonal_each_with_indices__ do |v,i,j|\n            vv << v\n            ii << i\n            jj << j\n          end\n          expect(vv).to eq([7,13,0])\n          expect(ii).to eq([1,2,3])\n          expect(jj).to eq([0,1,2])\n        end\n\n        it \"should iterate along a sliced non-diagonal portion of a sliced A array\" do\n          pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n          vv = []\n          ii = []\n          jj = []\n          n.extend NMatrix::YaleFunctions\n          m.extend NMatrix::YaleFunctions\n          m.send :__yale_stored_nondiagonal_each_with_indices__ do |v,i,j|\n            vv << v\n            ii << i\n            jj << j\n          end\n\n          expect(ii).to eq([0,0, 1,   3,3 ])\n          expect(jj).to eq([1,2, 0,   1,2 ])\n          expect(vv).to eq([8,9, 12, 17,18])\n        end\n\n        it \"should visit each stored element of the matrix in order by indices\" do\n          pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n          vv = []\n          ii = []\n          jj = []\n          n.each_ordered_stored_with_indices do |v,i,j|\n            vv << v\n            ii << i\n            jj << j\n          end\n\n          expect(vv).to eq([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 16, 17, 18, 19, 20])\n          expect(ii).to eq([[0]*5, [1]*5, [2]*5, [3]*1, [4]*5].flatten)\n          expect(jj).to eq([0,1,2,3,4,  0,1,2,3,5,  0,1,2,4,5,  3,  0,2,3,4,5])\n        end\n\n        it \"should visit each stored element of the slice in order by indices\" do\n          pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n          vv = []\n          ii = []\n          jj = []\n          m.each_ordered_stored_with_indices do |v,i,j|\n            vv << v\n            ii << i\n            jj << j\n          end\n          expect(ii).to eq([0,0,0, 1,1,   2,  3,3  ])\n          expect(jj).to eq([0,1,2, 0,1,   2,  1,2  ])\n          expect(vv).to eq([7,8,9, 12,13, 0, 17,18 ])\n        end\n      end\n\n      it \"should visit each cell in the matrix as if dense, making indices available\" do\n        vv = []\n        ii = []\n        jj = []\n        n.each_with_indices do |v,i,j|\n          vv << v\n          ii << i\n          jj << j\n        end\n\n        expect(vv).to eq([1,2,3,4,5,0,6,7,8,9,0,10,11,12,13,0,14,15,0,0,0,0,0,0,16,0,17,18,19,20])\n        expect(ii).to eq([[0]*6, [1]*6, [2]*6, [3]*6, [4]*6].flatten)\n        expect(jj).to eq([0,1,2,3,4,5]*5)\n      end\n\n      it \"should visit each cell in the slice as if dense, making indices available\" do\n        vv = []\n        ii = []\n        jj = []\n        m.each_with_indices do |v,i,j|\n          vv << v\n          ii << i\n          jj << j\n        end\n        expect(jj).to eq([0,1,2]*4)\n        expect(ii).to eq([[0]*3, [1]*3, [2]*3, [3]*3].flatten)\n        expect(vv).to eq([7,8,9,12,13,0,0,0,0,0,17,18])\n\n      end\n\n      if stype == :list or stype == :dense then\n        it \"should correctly map to a matrix with a single element\" do\n          nm = N.new([1], [2.0], stype: stype)\n          expect(nm.map { |e| e**2 }).to eq N.new([1], [4.0], stype: stype)\n        end\n\n        it \"should correctly map to a matrix with multiple elements\" do\n          nm = N.new([2], [2.0, 2.0], stype: stype)\n          expect(nm.map { |e| e**2 }).to eq N.new([2], [4.0, 4.0], stype: stype)\n        end\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "spec/02_slice_spec.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == 02_slice_spec.rb\n#\n# Test of slice operations. High priority tests since reference\n# slicing is needed for pretty_print.\n#\nrequire 'spec_helper'\n\ndescribe \"Slice operation\" do\n  include RSpec::Longrun::DSL\n\n  [:dense, :list, :yale].each do |stype|\n    context \"for #{stype}\" do\n        #GC.start # don't have to do this, but it helps to make sure we've cleaned up our pointers properly.\n      let(:stype_matrix) { create_matrix(stype) }\n\n      it \"should correctly return a row of a reference-slice\" do\n        n = create_rectangular_matrix(stype)\n        stype_matrix = n[1..4,1..3]\n        expect(stype_matrix.row(1, :copy)).to eq(stype_matrix.row(1, :reference))\n        expect(stype_matrix.row(1, :copy).to_flat_array).to eq([12,13,0])\n      end\n\n      if stype == :yale\n        it \"should binary search for the left boundary of a partial row of stored indices correctly\" do\n          #FIXME\n          pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n          n = NMatrix.new(10, stype: :yale, dtype: :int32)\n          n[3,0] = 1\n          #n[3,2] = 2\n          n[3,3] = 3\n          n[3,4] = 4\n          n[3,6] = 5\n          n[3,8] = 6\n          n[3,9] = 7\n          vs = []\n          is = []\n          js = []\n\n          n[3,1..9].each_stored_with_indices do |v,i,j|\n            vs << v\n            is << i\n            js << j\n          end\n\n          expect(vs).to eq([3,4,5,6,7])\n          expect(js).to eq([2,3,5,7,8])\n          expect(is).to eq([0,0,0,0,0])\n        end\n      elsif stype == :list\n        it \"should iterate across a partial row of stored indices\" do\n          vs = []\n          is = []\n          js = []\n\n          STDERR.puts(\"now\") if stype == :yale\n          stype_matrix[2,1..2].each_stored_with_indices do |v,i,j|\n            vs << v\n            is << i\n            js << j\n          end\n\n          expect(vs).to eq([7,8])\n          expect(is).to eq([0,0])\n          expect(js).to eq([0,1])\n        end\n      end\n\n      unless stype == :dense\n        it \"should iterate across a row of stored indices\" do\n          #FIXME\n          pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n          vs = []\n          is = []\n          js = []\n          stype_matrix[2,0..2].each_stored_with_indices do |v,i,j|\n            vs << v\n            is << i\n            js << j\n          end\n          expect(vs).to eq(stype == :yale ? [8,6,7] : [6,7,8])\n          expect(is).to eq([0,0,0])\n          expect(js).to eq(stype == :yale ? [2,0,1] : [0,1,2])\n        end\n\n        it \"should iterate across a submatrix of stored indices\" do\n          #FIXME\n          pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n          vs = []\n          is = []\n          js = []\n          stype_matrix[0..1,1..2].each_stored_with_indices do |v,i,j|\n            vs << v\n            is << i\n            js << j\n          end\n\n          expect(vs).to eq(stype == :yale ? [4,1,2,5] : [1,2,4,5])\n          expect(is).to eq(stype == :yale ? [1,0,0,1] : [0,0,1,1])\n          expect(js).to eq(stype == :yale ? [0,0,1,1] : [0,1,0,1])\n        end\n      end\n\n      it \"should return correct supershape\" do\n        pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n        x = NMatrix.random([10,12])\n        y = x[0...8,5...12]\n        expect(y.shape).to eq([8,7])\n        expect(y.supershape).to eq([10,12])\n      end\n\n      it \"should have #is_ref? method\" do\n        pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n        a = stype_matrix[0..1, 0..1]\n        b = stype_matrix.slice(0..1, 0..1)\n        expect(stype_matrix.is_ref?).to be false\n        expect(a.is_ref?).to be true\n        expect(b.is_ref?).to be false\n      end\n\n      it \"reference should compare with non-reference\" do\n        expect(stype_matrix.slice(1..2,0..1)).to eq(stype_matrix[1..2, 0..1])\n        expect(stype_matrix[1..2,0..1]).to eq(stype_matrix.slice(1..2, 0..1))\n        expect(stype_matrix[1..2,0..1]).to eq(stype_matrix[1..2, 0..1])\n      end\n\n      context \"with copying\" do\n        it 'should return an NMatrix' do\n          n = stype_matrix.slice(0..1,0..1)\n          expect(nm_eql(n, NMatrix.new([2,2], [0,1,3,4], dtype: :int32))).to be true\n        end\n\n        it 'should return a copy of 2x2 matrix to self elements' do\n          pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n          n = stype_matrix.slice(1..2,0..1)\n          expect(n.shape).to eql([2,2])\n\n          expect(n[1,1]).to eq(stype_matrix[2,1])\n          n[1,1] = -9\n          expect(stype_matrix[2,1]).to eql(7)\n        end\n\n        it 'should return a 1x2 matrix without refs to self elements' do\n          #FIXME\n          pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n          n = stype_matrix.slice(0,1..2)\n          expect(n.shape).to eql([1,2])\n\n          expect(n[0]).to eq(stype_matrix[0,1])\n          expect(n[1]).to eq(stype_matrix[0,2])\n          n[0] = -9\n          expect(stype_matrix[0,1]).to eql(1)\n          expect(stype_matrix[0,2]).to eql(2)\n        end\n\n        it 'should return a 2x1 matrix without refs to self elements' do\n          pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n          stype_matrix.extend NMatrix::YaleFunctions\n\n          n = stype_matrix.slice(0..1,1)\n          expect(n.shape).to eql([2,1])\n\n          expect(n[0]).to eq(stype_matrix[0,1])\n          expect(n[1]).to eq(stype_matrix[1,1])\n          n[0] = -9\n          expect(stype_matrix[0,1]).to eql(1)\n          expect(stype_matrix[1,1]).to eql(4)\n        end\n\n        it 'should be correct slice for range 0..2 and 0...3' do\n          expect(stype_matrix.slice(0..2,0..2)).to eq(stype_matrix.slice(0...3,0...3))\n        end\n\n        [:dense, :list, :yale].each do |cast_type|\n          it \"should cast copied slice from #{stype.upcase} to #{cast_type.upcase}\" do\n            expect(nm_eql(stype_matrix.slice(1..2, 1..2).cast(cast_type, :int32), stype_matrix.slice(1..2,1..2))).to be true\n            expect(nm_eql(stype_matrix.slice(0..1, 1..2).cast(cast_type, :int32), stype_matrix.slice(0..1,1..2))).to be true\n            expect(nm_eql(stype_matrix.slice(1..2, 0..1).cast(cast_type, :int32), stype_matrix.slice(1..2,0..1))).to be true\n            expect(nm_eql(stype_matrix.slice(0..1, 0..1).cast(cast_type, :int32), stype_matrix.slice(0..1,0..1))).to be true\n\n            # Non square\n            expect(nm_eql(stype_matrix.slice(0..2, 1..2).cast(cast_type, :int32), stype_matrix.slice(0..2,1..2))).to be true\n            #require 'pry'\n            #binding.pry if cast_type == :yale\n            expect(nm_eql(stype_matrix.slice(1..2, 0..2).cast(cast_type, :int32), stype_matrix.slice(1..2,0..2))).to be true\n\n            # Full\n            expect(nm_eql(stype_matrix.slice(0..2, 0..2).cast(cast_type, :int32), stype_matrix)).to be true\n          end\n        end\n      end\n\n      # Yale:\n      #context \"by copy\" do\n        #it \"should correctly preserve zeros\" do\n        #  stype_matrix = NMatrix.new(:yale, 3, :int64)\n        #  column_slice = stype_matrix.column(2, :copy)\n        #  column_slice[0].should == 0\n        #  column_slice[1].should == 0\n        #  column_slice[2].should == 0\n        #end\n      #end\n\n      context \"by reference\" do\n        it 'should return an NMatrix' do\n          n = stype_matrix[0..1,0..1]\n          expect(nm_eql(n, NMatrix.new([2,2], [0,1,3,4], dtype: :int32))).to be true\n        end\n\n        it 'should return a 2x2 matrix with refs to self elements' do\n          #FIXME\n          pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby? # and :cast_type != :dense\n          n = stype_matrix[1..2,0..1]\n          expect(n.shape).to eql([2,2])\n\n          expect(n[0,0]).to eq(stype_matrix[1,0])\n          n[0,0] = -9\n          expect(stype_matrix[1,0]).to eql(-9)\n        end\n\n        it 'should return a 1x2 vector with refs to self elements' do\n          #FIXME\n          pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby? # and :cast_type != :dense\n          n = stype_matrix[0,1..2]\n          expect(n.shape).to eql([1,2])\n\n          expect(n[0]).to eq(stype_matrix[0,1])\n          n[0] = -9\n          expect(stype_matrix[0,1]).to eql(-9)\n        end\n\n        it 'should return a 2x1 vector with refs to self elements' do\n          pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n          n = stype_matrix[0..1,1]\n          expect(n.shape).to eql([2,1])\n\n          expect(n[0]).to eq(stype_matrix[0,1])\n          n[0] = -9\n          expect(stype_matrix[0,1]).to eql(-9)\n        end\n\n        it 'should slice again' do\n          n = stype_matrix[1..2, 1..2]\n          expect(nm_eql(n[1,0..1], NVector.new(2, [7,8], dtype: :int32).transpose)).to be true\n        end\n\n        it 'should be correct slice for range 0..2 and 0...3' do\n          expect(stype_matrix[0..2,0..2]).to eq(stype_matrix[0...3,0...3])\n        end\n\n        it 'should correctly handle :* slice notation' do\n          expect(stype_matrix[:*,0]).to eq stype_matrix[0...stype_matrix.shape[0], 0]\n        end\n\n        if stype == :dense\n          [:byte,:int8,:int16,:int32,:int64,:float32,:float64].each do |left_dtype|\n            [:byte,:int8,:int16,:int32,:int64,:float32,:float64].each do |right_dtype|\n\n              # Won't work if they're both 1-byte, due to overflow.\n              next if [:byte,:int8].include?(left_dtype) && [:byte,:int8].include?(right_dtype)\n\n              # For now, don't bother testing int-int mult.\n              #next if [:int8,:int16,:int32,:int64].include?(left_dtype) && [:int8,:int16,:int32,:int64].include?(right_dtype)\n              it \"handles #{left_dtype.to_s} dot #{right_dtype.to_s} matrix multiplication\" do\n                #STDERR.puts \"dtype=#{dtype.to_s}\"\n                #STDERR.puts \"2\"\n\n                nary = if left_dtype.to_s =~ /complex/\n                         COMPLEX_MATRIX43A_ARRAY\n                       else\n                         MATRIX43A_ARRAY\n                       end\n\n                mary = if right_dtype.to_s =~ /complex/\n                         COMPLEX_MATRIX32A_ARRAY\n                       else\n                         MATRIX32A_ARRAY\n                       end\n\n                n = NMatrix.new([4,3], nary, dtype: left_dtype)[1..3,1..2]\n                m = NMatrix.new([3,2], mary, dtype: right_dtype)[1..2,0..1]\n\n                r = n.dot m\n                expect(r.shape).to eql([3,2])\n\n                expect(r[0,0]).to eq(219.0)\n                expect(r[0,1]).to eq(185.0)\n                expect(r[1,0]).to eq(244.0)\n                expect(r[1,1]).to eq(205.0)\n                expect(r[2,0]).to eq(42.0)\n                expect(r[2,1]).to eq(35.0)\n\n              end\n            end\n          end\n\n          context \"operations\" do\n\n            it \"correctly transposes slices\" do\n              expect(stype_matrix[0...3,0].transpose).to eq NMatrix[[0, 3, 6]]\n              expect(stype_matrix[0...3,1].transpose).to eq NMatrix[[1, 4, 7]]\n              expect(stype_matrix[0...3,2].transpose).to eq NMatrix[[2, 5, 8]]\n              expect(stype_matrix[0,0...3].transpose).to eq NMatrix[[0], [1], [2]]\n              expect(stype_matrix[1,0...3].transpose).to eq NMatrix[[3], [4], [5]]\n              expect(stype_matrix[2,0...3].transpose).to eq NMatrix[[6], [7], [8]]\n              expect(stype_matrix[1..2,1..2].transpose).to eq NMatrix[[4, 7], [5, 8]]\n            end\n\n            it \"adds slices\" do\n              expect(NMatrix[[0,0,0]] + stype_matrix[1,0..2]).to eq NMatrix[[3, 4, 5]]\n            end\n\n            it \"scalar adds to slices\" do\n              expect(stype_matrix[1,0..2]+1).to eq NMatrix[[4, 5, 6]]\n            end\n\n            it \"compares slices to scalars\" do\n              #FIXME\n              pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n              (stype_matrix[1, 0..2] > 2).each { |e| expect(e != 0).to be true }\n            end\n\n            it \"iterates only over elements in the slice\" do\n              els = []\n              stype_matrix[1, 0..2].each { |e| els << e }\n              expect(els.size).to eq 3\n              expect(els[0]).to eq 3\n              expect(els[1]).to eq 4\n              expect(els[2]).to eq 5\n            end\n\n            it \"iterates with index only over elements in the slice\" do\n              els = []\n              stype_matrix[1, 0..2].each_stored_with_indices { |a| els << a }\n              expect(els.size).to eq 3\n              expect(els[0]).to eq [3, 0, 0]\n              expect(els[1]).to eq [4, 0, 1]\n              expect(els[2]).to eq [5, 0, 2]\n            end\n\n          end\n\n        end\n\n        example 'should be cleaned up by garbage collector without errors'  do\n          step \"reference slice\" do\n            1.times do\n              n = stype_matrix[1..2,0..1]\n            end\n            GC.start\n          end\n\n          step \"reference slice of casted-copy\" do\n            expect(stype_matrix).to eq(NMatrix.new([3,3], (0..9).to_a, dtype: :int32).cast(stype, :int32))\n            n = nil\n            1.times do\n              m = NMatrix.new([2,2], [1,2,3,4]).cast(stype, :int32)\n              n = m[0..1,0..1]\n            end\n            GC.start\n            expect(n).to eq(NMatrix.new([2,2], [1,2,3,4]).cast(stype, :int32))\n          end\n        end\n\n        [:dense, :list, :yale].each do |cast_type|\n          it \"should cast a square reference-slice from #{stype.upcase} to #{cast_type.upcase}\" do\n            expect(nm_eql(stype_matrix[1..2, 1..2].cast(cast_type), stype_matrix[1..2,1..2])).to be true\n            expect(nm_eql(stype_matrix[0..1, 1..2].cast(cast_type), stype_matrix[0..1,1..2])).to be true\n            expect(nm_eql(stype_matrix[1..2, 0..1].cast(cast_type), stype_matrix[1..2,0..1])).to be true\n            expect(nm_eql(stype_matrix[0..1, 0..1].cast(cast_type), stype_matrix[0..1,0..1])).to be true\n          end\n\n          it \"should cast a rectangular reference-slice from #{stype.upcase} to #{cast_type.upcase}\" do\n            # Non square\n            expect(nm_eql(stype_matrix[0..2, 1..2].cast(cast_type), stype_matrix[0..2,1..2])).to be true # FIXME: memory problem.\n            expect(nm_eql(stype_matrix[1..2, 0..2].cast(cast_type), stype_matrix[1..2,0..2])).to be true # this one is fine\n          end\n\n          it \"should cast a square full-matrix reference-slice from #{stype.upcase} to #{cast_type.upcase}\" do\n            expect(nm_eql(stype_matrix[0..2, 0..2].cast(cast_type), stype_matrix)).to be true\n          end\n        end\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "spec/03_nmatrix_monkeys_spec.rb",
    "content": "require 'spec_helper'\n\ndescribe NMatrix do\n  describe \"#to_a\" do\n    it \"creates an Array with the same dimensions\" do\n      n = NMatrix.seq([3,2])\n      expect(n.to_a).to eq([[0, 1], [2, 3], [4, 5]])\n    end\n\n    it \"creates an Array with the proper element type\" do\n      n = NMatrix.seq([3,2], dtype: :float64)\n      expect(n.to_a).to eq([[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]])\n    end\n\n    it \"properly interprets list matrices\" do\n      n = NMatrix.seq([3,2], stype: :list)\n      expect(n.to_a).to eq([[0, 1], [2, 3], [4, 5]])\n    end\n\n    it \"properly interprets yale matrices\" do\n      n = NMatrix.seq([3,2], stype: :yale)\n      expect(n.to_a).to eq([[0, 1], [2, 3], [4, 5]])\n    end\n  end\nend\n\ndescribe Array do\n  describe \"#to_nm\" do\n    # [0, 1, 2, 3, 4, 5]\n    let(:a) {(0..5).to_a}\n\n    it \"uses a given shape and type\" do\n      expect(a.to_nm([3,2]).dtype).to eq :int64\n      expect(a.to_nm([3,2])).to eq(NMatrix.seq([3,2]))\n    end\n\n    it \"guesses dtype based on first element\" do\n      a[0] = 0.0\n      expect(a.to_nm([3,2]).dtype).to eq :float64\n    end\n\n    it \"defaults to dtype :object if necessary\" do\n      #FIXME\n      pending(\"not yet implemented for object dtype for NMatrix-JRuby\") if jruby?\n      a = %w(this is an array of strings)\n      expect(a.to_nm([3,2]).dtype).to eq :object\n      expect(a.to_nm([3,2])).to eq(NMatrix.new([3,2], a, dtype: :object))\n    end\n\n    it \"attempts to intuit the shape of the Array\" do\n      a = [[0, 1], [2, 3], [4, 5]]\n      expect(a.to_nm).to eq(NMatrix.new([3,2], a.flatten))\n      expect(a.to_nm.dtype).to eq :int64\n    end\n\n    it \"creates an object Array for inconsistent dimensions\" do\n      a = [[0, 1, 2], [3], [4, 5]]\n      expect(a.to_nm).to eq(NMatrix.new([3], a, dtype: :object))\n      expect(a.to_nm.dtype).to eq :object\n    end\n\n    it \"intuits shape of Array into multiple dimensions\" do\n      a = [[[0], [1]], [[2], [3]], [[4], [5]]]\n      expect(a.to_nm).to eq(NMatrix.new([3,2,1], a.flatten))\n      expect(a).to eq(a.to_nm.to_a)\n    end\n\n    it \"is reflective with NMatrix#to_a\" do\n      a = [[0, 1, 2], [3], [4, 5]]\n      expect(a).to eq(a.to_nm.to_a)\n    end\n\n    it \"does not permanently alter the Array\" do\n      a = [[0, 1], [2, 3], [4, 5]]\n      expect(a.to_nm).to eq(NMatrix.new([3,2], a.flatten))\n      expect(a).to eq([[0, 1], [2, 3], [4, 5]])\n    end\n  end\nend\n\n"
  },
  {
    "path": "spec/blas_spec.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == blas_spec.rb\n#\n# Tests for properly exposed BLAS functions.\n#\n\nrequire 'spec_helper'\n\ndescribe NMatrix::BLAS do\n  [:byte, :int8, :int16, :int32, :int64,\n   :float32, :float64, :complex64, :complex128,\n   :object\n  ].each do |dtype|\n    context dtype do\n      it \"exposes cblas_scal\" do\n        x = NMatrix.new([3, 1], [1, 2, 3], dtype: dtype)\n        NMatrix::BLAS.cblas_scal(3, 2, x, 1)\n        expect(x).to eq(NMatrix.new([3, 1], [2, 4, 6], dtype: dtype))\n      end\n\n      it \"exposes cblas_imax\" do\n        u = NMatrix.new([3,1], [1, 4, 3], dtype: dtype)\n        index = NMatrix::BLAS.cblas_imax(3, u, 1)\n        expect(index).to eq(1)\n      end\n    end\n  end\n\n  [:float32, :float64, :complex64, :complex128].each do |dtype|\n    context dtype do\n      # This is not the same as \"exposes cblas trsm\", which would be for a version defined in blas.rb (which\n      # would greatly simplify the calling of cblas_trsm in terms of arguments, and which would be accessible\n      # as NMatrix::BLAS::trsm)\n      it \"exposes unfriendly cblas_trsm\" do\n        a     = NMatrix.new(3, [4,-1.0/2, -3.0/4, -2, 2, -1.0/4, -4, -2, -1.0/2], dtype: dtype)\n        b     = NMatrix.new([3,1], [-1, 17, -9], dtype: dtype)\n        NMatrix::BLAS::cblas_trsm(:row, :right, :lower, :transpose, :nonunit, 1, 3, 1.0, a, 3, b, 3)\n\n        # These test results all come from actually running a matrix through BLAS. We use them to ensure that NMatrix's\n        # version of these functions give similar results.\n\n        expect(b[0]).to eq(-1.0/4)\n        expect(b[1]).to eq(33.0/4)\n        expect(b[2]).to eq(-13)\n\n        NMatrix::BLAS::cblas_trsm(:row, :right, :upper, :transpose, :unit, 1, 3, 1.0, a, 3, b, 3)\n\n        expect(b[0]).to eq(-15.0/2)\n        expect(b[1]).to eq(5)\n        expect(b[2]).to eq(-13)\n        \n        NMatrix::BLAS::cblas_trsm(:row, :left, :lower, :transpose, :nounit, 3, 1, 1.0, a, 3, b, 1)\n\n        expect(b[0]).to eq(307.0/8)\n        expect(b[1]).to eq(57.0/2)\n        expect(b[2]).to eq(26.0)\n        \n        NMatrix::BLAS::cblas_trsm(:row, :left, :upper, :transpose, :unit, 3, 1, 1.0, a, 3, b, 1)\n\n        expect(b[0]).to eq(307.0/8)\n        expect(b[1]).to eq(763.0/16)\n        expect(b[2]).to eq(4269.0/64)        \n      end\n\n      # trmm multiplies two matrices, where one of the two is required to be\n      # triangular\n      it \"exposes cblas_trmm\" do\n        a = NMatrix.new([3,3], [1,1,1, 0,1,2, 0,0,-1], dtype: dtype)\n        b = NMatrix.new([3,3], [1,2,3, 4,5,6, 7,8,9], dtype: dtype)\n\n        begin\n          NMatrix::BLAS.cblas_trmm(:row, :left, :upper, false, :not_unit, 3, 3, 1, a, 3, b, 3)\n        rescue NotImplementedError => e\n          pending e.to_s\n        end\n\n        product = NMatrix.new([3,3], [12,15,18, 18,21,24, -7,-8,-9], dtype: dtype)\n        expect(b).to eq(product)\n      end\n    end\n  end\n\n  #should have a separate test for complex\n  [:float32, :float64, :complex64, :complex128, :object].each do |dtype|\n    context dtype do\n\n      it \"exposes cblas rot\" do\n        x = NMatrix.new([5,1], [1,2,3,4,5], dtype: dtype)\n        y = NMatrix.new([5,1], [-5,-4,-3,-2,-1], dtype: dtype)\n        x, y = NMatrix::BLAS::rot(x, y, 1.0/2, Math.sqrt(3)/2, -1)\n\n        expect(x).to be_within(1e-4).of(\n                   NMatrix.new([5,1], [-0.3660254037844386, -0.7320508075688772, -1.098076211353316, -1.4641016151377544, -1.8301270189221928], dtype: dtype)\n                 )\n\n        expect(y).to be_within(1e-4).of(\n                   NMatrix.new([5,1], [-6.830127018922193, -5.464101615137754, -4.098076211353316, -2.732050807568877, -1.3660254037844386], dtype: dtype)\n                 )\n      end\n\n    end\n  end\n\n  [:float32, :float64, :complex64, :complex128, :object].each do |dtype|\n    context dtype do\n\n      it \"exposes cblas rotg\" do\n        pending(\"broken for :object\") if dtype == :object\n\n        ab = NMatrix.new([2,1], [6,-8], dtype: dtype)\n        begin\n          c,s = NMatrix::BLAS::rotg(ab)\n        rescue NotImplementedError => e\n          pending e.to_s\n        end\n\n        if [:float32, :float64].include?(dtype)\n          expect(ab[0]).to be_within(1e-6).of(-10)\n          expect(ab[1]).to be_within(1e-6).of(-5.0/3)\n          expect(c).to be_within(1e-6).of(-3.0/5)\n        else\n          pending \"need correct test cases\"\n          expect(ab[0]).to be_within(1e-6).of(10)\n          expect(ab[1]).to be_within(1e-6).of(5.0/3)\n          expect(c).to be_within(1e-6).of(3.0/5)\n        end\n        expect(s).to be_within(1e-6).of(4.0/5)\n      end\n\n      # Note: this exposes gemm, not cblas_gemm (which is the unfriendly CBLAS no-error-checking version)\n      it \"exposes gemm\" do\n        n = NMatrix.new([4,3], [14.0,9.0,3.0, 2.0,11.0,15.0, 0.0,12.0,17.0, 5.0,2.0,3.0], dtype: dtype)\n        m = NMatrix.new([3,2], [12.0,25.0, 9.0,10.0, 8.0,5.0], dtype: dtype)\n\n        #c = NMatrix.new([4,2], dtype)\n        r = NMatrix::BLAS.gemm(n, m) #, c)\n        #c.should equal(r) # check that both are same memory address\n\n        expect(r).to eq(NMatrix.new([4,2], [273,455,243,235,244,205,102,160], dtype: dtype))\n      end\n\n      it \"exposes gemv\" do\n        a = NMatrix.new([4,3], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0], dtype: dtype)\n        x = NMatrix.new([3,1], [2.0, 1.0, 0.0], dtype: dtype)\n        y = NMatrix::BLAS.gemv(a, x)\n        expect(y).to eq(NMatrix.new([4,1],[4.0,13.0,22.0,31.0],dtype: dtype))\n      end\n\n      it \"exposes asum\" do\n        pending(\"broken for :object\") if dtype == :object\n\n        x = NMatrix.new([4,1], [-1,2,3,4], dtype: dtype)\n        expect(NMatrix::BLAS.asum(x)).to eq(10)\n      end\n\n      it \"exposes asum for single element\" do\n        if [:complex64,:complex128].include?(dtype)\n          x = NMatrix.new([1], [Complex(-3,2)], dtype: dtype)\n          expect(x.asum).to eq(5.0)\n        else\n          x = NMatrix.new([1], [-1], dtype: dtype)\n          expect(x.asum).to eq(1.0)\n        end\n      end\n\n      it \"exposes nrm2\" do\n        pending(\"broken for :object\") if dtype == :object\n\n        if dtype =~ /complex/\n          x = NMatrix.new([3,1], [Complex(1,2),Complex(3,4),Complex(0,6)], dtype: dtype)\n          y = NMatrix.new([3,1], [Complex(0,0),Complex(0,0),Complex(0,0)], dtype: dtype)\n          nrm2 = 8.12403840463596\n        else\n          x = NMatrix.new([4,1], [2,-4,3,5], dtype: dtype)\n          y = NMatrix.new([3,1], [0,0,0], dtype: dtype)\n          nrm2 = 5.385164807134504\n        end\n        \n        err = case dtype\n                when :float32, :complex64\n                  1e-6\n                when :float64, :complex128\n                  1e-14\n                else\n                  1e-14\n              end\n\n        expect(NMatrix::BLAS.nrm2(x, 1, 3)).to be_within(err).of(nrm2)\n        expect(NMatrix::BLAS.nrm2(y, 1, 3)).to be_within(err).of(0)\n      end\n\n    end\n  end\nend\n"
  },
  {
    "path": "spec/elementwise_spec.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == nmatrix_spec.rb\n#\n# Element-wise operation tests.\n#\n\nrequire 'spec_helper'\n\ndescribe NMatrix do\n  context \"yale\" do\n    before :each do\n      @n = NMatrix.new(3, stype: :yale, dtype: :int64)\n      @n.extend NMatrix::YaleFunctions unless jruby?\n      @m = NMatrix.new(3, stype: :yale, dtype: :int64)\n      @n[0,0] = 52\n      @n[0,2] = 5\n      @n[1,1] = 40\n      @n[0,1] = 30\n      @n[2,0] = 6\n      @m[1,1] = -48\n      @m[0,2] = -5\n      @n.extend NMatrix::YaleFunctions unless jruby?\n    end\n\n    it \"should perform scalar math\" do\n      pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n      x = @n * 3\n      expect(x[0,0]).to eq(52 * 3)\n      expect(x[0,1]).to eq(30 * 3)\n      expect(x[0,2]).to eq(5 * 3)\n      expect(x[1,1]).to eq(40 * 3)\n      expect(x[2,0]).to eq(6 * 3)\n\n      r = NMatrix.new(3, stype: :yale, dtype: :int64)\n      y = r + 3\n      expect(y[0,0]).to eq(3)\n    end\n\n    it \"should refuse to perform a dot operation on a yale with non-zero default\" do\n      pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n      r = NMatrix.new(3, stype: :yale, dtype: :int64)\n      y = r + 3\n      expect { y.dot(r) }.to raise_error\n      expect { r.dot(y) }.to raise_error\n    end\n\n    it \"should perform element-wise addition\" do\n      expect(@n+@m).to eq(NMatrix.new(:dense, 3, [52,30,0,0,-8,0,6,0,0], :int64).cast(:yale, :int64))\n    end\n\n    it \"should perform element-wise subtraction\" do\n      expect(@n-@m).to eq(NMatrix.new(:dense, 3, [52,30,10,0,88,0,6,0,0], :int64).cast(:yale, :int64))\n    end\n\n    it \"should perform element-wise multiplication\" do\n      r = NMatrix.new(:dense, 3, [0,0,-25,0,-1920,0,0,0,0], :int64).cast(:yale, :int64)\n      m = NMatrix.new(2, stype: :yale, dtype: :int64)\n      expect(@n*@m).to eq(r)\n    end\n\n    it \"should perform element-wise division\" do\n      pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n      r = NMatrix.new(:dense, 3, [52, 30, -2, 0, -1, 0, 6, 0, 0], :int64).cast(:yale, :int64)\n      expect(@n/(@m+1)).to eq(r)\n    end\n\n    it \"should perform element-wise modulo\" do\n      pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n      m = NMatrix.new(3, stype: :yale, dtype: :int64, default: 0) + 5\n      expect(@n % m).to eq(NMatrix.new(:dense, 3, [2,0,0,0,0,0,1,0,0], :int64).cast(:yale, :int64))\n    end\n\n    it \"should handle element-wise equality (=~)\" do\n      expect(@n =~ @m).to eq(NMatrix.new(:dense, 3, [false,false,false,true,false,true,false,true,true], :object).cast(:yale, :object, false))\n    end\n\n    it \"should handle element-wise inequality (!~)\" do\n      expect(@n !~ @m).to eq(NMatrix.new(:dense, 3, [true,true,true,false,true,false,true,false,false], :object).cast(:yale, :object, true))\n    end\n\n    it \"should handle element-wise less-than (<)\" do\n      expect(@m < @n).to eq(NMatrix.new(:dense, 3, [true,true,true,false,true,false,true,false,false], :object).cast(:yale, :object, true))\n    end\n\n    it \"should handle element-wise greater-than (>)\" do\n      expect(@n > @m).to eq(NMatrix.new(:dense, 3, [true,true,true,false,true,false,true,false,false], :object).cast(:yale, :object, false))\n    end\n\n    it \"should handle element-wise greater-than-or-equals (>=)\" do\n      expect(@n >= @m).to eq(NMatrix.new(:dense, 3, true, :object).cast(:yale,:object, true))\n    end\n\n    it \"should handle element-wise less-than-or-equals (<=)\" do\n      r = NMatrix.new(:dense, 3, [false,false,false,true,false,true,false,true,true], :object).cast(:yale, :object, false)\n      expect(@n <= @m).to eq(r)\n    end\n  end\n\n\n  context \"list\" do\n    before :each do\n      @n = NMatrix.new(:list, 2, 0, :int64)\n      @m = NMatrix.new(:list, 2, 0, :int64)\n      @n[0,0] = 52\n      @m[1,1] = -48\n      @n[1,1] = 40\n    end\n\n    it \"should perform scalar math\" do\n      pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n      x = @n * 3\n      expect(x[0,0]).to eq(52 * 3)\n      expect(x[1,1]).to eq(40 * 3)\n      expect(x[0,1]).to eq(0)\n\n      r = NMatrix.new(3, stype: :list, default: 1)\n      y = r + 3\n      expect(y[0,0]).to eq(4)\n    end\n\n    it \"should perform element-wise addition\" do\n      r = NMatrix.new(2, stype: :list, dtype: :int64, default: 0)\n      r[0,0] = 52\n      r[1,1] = -8\n      q = @n + @m\n      expect(q).to eq(r)\n    end\n\n    it \"should perform element-wise subtraction\" do\n      r = NMatrix.new(:dense, 2, [52, 0, 0, 88], :int64).cast(:list, :int64)\n      expect(@n-@m).to eq(r)\n    end\n\n    it \"should perform element-wise multiplication\" do\n      r = NMatrix.new(:dense, 2, [52, 0, 0, -1920], :int64).cast(:list, :int64)\n      m = NMatrix.new(:list, 2, 1, :int64)\n      m[1,1] = -48\n      expect(@n*m).to eq(r)\n    end\n\n    it \"should perform element-wise division\" do\n      m = NMatrix.new(:list, 2, 1, :int64)\n      m[1,1] = 2\n      r = NMatrix.new(:dense, 2, [52, 0, 0, 20], :int64).cast(:list, :int64)\n      expect(@n/m).to eq(r)\n    end\n\n    it \"should perform element-wise modulo\" do\n      pending(\"not yet implemented for sparse matrices for NMatrix-JRuby\") if jruby?\n      m = NMatrix.new(:list, 2, 1, :int64)\n      m[0,0] = 50\n      m[1,1] = 40\n      (@n % m)\n    end\n\n    it \"should handle element-wise equality (=~)\" do\n      r = NMatrix.new(:list, 2, false, :object)\n      r[0,1] = true\n      r[1,0] = true\n\n      expect(@n =~ @m).to eq(r)\n    end\n\n    it \"should handle element-wise inequality (!~)\" do\n      r = NMatrix.new(:list, 2, false, :object)\n      r[0,0] = true\n      r[1,1] = true\n\n      expect(@n !~ @m).to eq(r)\n    end\n\n    it \"should handle element-wise less-than (<)\" do\n      expect(@n < @m).to eq(NMatrix.new(:list, 2, false, :object))\n    end\n\n    it \"should handle element-wise greater-than (>)\" do\n      r = NMatrix.new(:list, 2, false, :object)\n      r[0,0] = true\n      r[1,1] = true\n      expect(@n > @m).to eq(r)\n    end\n\n    it \"should handle element-wise greater-than-or-equals (>=)\" do\n      expect(@n >= @m).to eq(NMatrix.new(:list, 2, true, :object))\n    end\n\n    it \"should handle element-wise less-than-or-equals (<=)\" do\n      r = NMatrix.new(:list, 2, false, :object)\n      r[0,1] = true\n      r[1,0] = true\n      expect(@n <= @m).to eq(r)\n    end\n  end\n\n  context \"dense\" do\n    context \"scalar arithmetic\" do\n      before :each do\n        @n = NMatrix.new(:dense, 2, [1,2,3,4], :int64)\n      end\n\n      it \"works for integers\" do\n        expect(@n+1).to eq(NMatrix.new(:dense, 2, [2,3,4,5], :int64))\n      end\n\n      #it \"works for complex64\" do\n      #  n = @n.cast(:dtype => :complex64)\n      #  (n + 10.0).to_a.should == [Complex(11.0), Complex(12.0), Complex(13.0), Complex(14.0)]\n      #end\n    end\n\n    context \"elementwise arithmetic\" do\n      before :each do\n        @n = NMatrix.new(:dense, 2, [1,2,3,4], :int64)\n        @m = NMatrix.new(:dense, 2, [-4,-1,0,66], :int64)\n      end\n\n      it \"adds\" do\n        r = @n+@m\n        expect(r).to eq(NMatrix.new(:dense, [2,2], [-3, 1, 3, 70], :int64))\n      end\n\n      it \"subtracts\" do\n        r = @n-@m\n        expect(r).to eq(NMatrix.new(:dense, [2,2], [5, 3, 3, -62], :int64))\n      end\n\n      it \"multiplies\" do\n        r = @n*@m\n        expect(r).to eq(NMatrix.new(:dense, [2,2], [-4, -2, 0, 264], :int64))\n      end\n\n      it \"divides in the Ruby way\" do\n        pending(\"not yet implemented int dtype for NMatrix-JRuby\") if jruby?\n        m = @m.clone\n        m[1,0] = 3\n        r = @n/m\n        expect(r).to eq(NMatrix.new(:dense, [2,2], [-1, -2, 1, 0], :int64))\n      end\n\n      it \"exponentiates\" do\n        r = @n ** 2\n        # TODO: We might have problems with the dtype.\n        expect(r).to eq(NMatrix.new(:dense, [2,2], [1, 4, 9, 16], :int64))\n      end\n\n      it \"modulo\" do\n        pending(\"not yet implemented int dtype for NMatrix-JRuby\") if jruby?\n        expect(@n % (@m + 2)).to eq(NMatrix.new(:dense, [2,2], [-1, 0, 1, 4], :int64))\n      end\n    end\n\n    context \"elementwise comparisons\" do\n      before :each do\n        @n = NMatrix.new(:dense, 2, [1,2,3,4], :int64)\n        @m = NMatrix.new(:dense, 2, [-4,-1,3,2], :int64)\n      end\n\n      it \"equals\" do\n        r = @n =~ @m\n        expect(r).to eq(NMatrix.new(:dense, [2,2], [false, false, true, false], :object))\n      end\n\n      it \"is not equal\" do\n        r = @n !~ @m\n        expect(r).to eq(NMatrix.new(:dense, [2,2], [true, true, false, true], :object))\n      end\n\n      it \"is less than\" do\n        r = @n < @m\n        expect(r).to eq(NMatrix.new(:dense, [2,2], false, :object))\n      end\n\n      it \"is greater than\" do\n        r = @n > @m\n        expect(r).to eq(NMatrix.new(:dense, [2,2], [true, true, false, true], :object))\n      end\n\n      it \"is less than or equal to\" do\n        r = @n <= @m\n        expect(r).to eq(NMatrix.new(:dense, [2,2], [false, false, true, false], :object))\n      end\n\n      it \"is greater than or equal to\" do\n        n = NMatrix.new(:dense, [2,2], [1, 2, 2, 4], :int64)\n        r = n >= @m\n        expect(r).to eq(NMatrix.new(:dense, [2,2], [true, true, false, true], :object))\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "spec/homogeneous_spec.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == homogeneous_spec.rb\n#\n# Specs for the homogeneous transformation matrix methods.\n#\n\nrequire 'spec_helper'\nrequire \"./lib/nmatrix/homogeneous.rb\"\n\nrequire 'pry'\n\ndescribe 'NMatrix' do\n  context \".x_rotation\" do\n    it \"should generate a matrix representing a rotation about the x axis\" do\n      x = NMatrix.x_rotation(Math::PI/6)\n      expect(x).to be_within(1e-8).of(NMatrix.new([4,4], [1.0, 0.0, 0.0, 0.0,\n                                                      0.0, Math.cos(Math::PI/6), -0.5, 0.0,\n                                                      0.0, 0.5, Math.cos(Math::PI/6), 0.0,\n                                                      0.0, 0.0, 0.0, 1.0] ))\n    end\n  end\n\n\n  context \".y_rotation\" do\n    it \"should generate a matrix representing a rotation about the y axis\" do\n      y = NMatrix.y_rotation(Math::PI/6)\n      expect(y).to be_within(1e-8).of(NMatrix.new([4,4], [Math.cos(Math::PI/6), 0.0, 0.5, 0.0,\n                                                      0.0, 1.0, 0.0, 0.0,\n                                                     -0.5, 0.0, Math.cos(Math::PI/6), 0.0,\n                                                      0.0, 0.0, 0.0, 1.0] ))\n    end\n  end\n\n  context \".z_rotation\" do\n    it \"should generate a matrix representing a rotation about the z axis\" do\n      z = NMatrix.z_rotation(Math::PI/6)\n      expect(z).to be_within(1e-8).of(NMatrix.new([4,4], [Math.cos(Math::PI/6), -0.5, 0.0, 0.0,\n                                                      0.5, Math.cos(Math::PI/6), 0.0, 0.0,\n                                                      0.0, 0.0, 1.0, 0.0,\n                                                      0.0, 0.0, 0.0, 1.0] ))\n    end\n  end\n\n  context \".translation\" do\n    it \"should generate a translation matrix from an Array\" do\n      t = NMatrix.translation([4,5,6])\n      expect(t).to be_within(1e-8).of(NMatrix.new([4,4], [1, 0, 0, 4,\n                                                      0, 1, 0, 5,\n                                                      0, 0, 1, 6,\n                                                      0, 0, 0, 1] ))\n    end\n\n    it \"should generate a translation matrix from x, y, and z values\" do\n      t = NMatrix.translation(4,5,6)\n      expect(t).to be_within(1e-8).of(NMatrix.new([4,4], [1, 0, 0, 4,\n                                                      0, 1, 0, 5,\n                                                      0, 0, 1, 6,\n                                                      0, 0, 0, 1] ))\n    end\n\n    it \"should generate a translation matrix from an NMatrix with correctly inferred dtype\" do\n      pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n      t = NMatrix.translation(NMatrix.new([3,1], [4,5,6], dtype: :float64) )\n      expect(t).to be_within(1e-8).of(NMatrix.new([4,4], [1, 0, 0, 4,\n                                                      0, 1, 0, 5,\n                                                      0, 0, 1, 6,\n                                                      0, 0, 0, 1] ))\n      expect(t.dtype).to be(:float64)\n    end\n  end\n\n  context \"#quaternion\" do\n    it \"should generate a singularity-free quaternion\" do\n      transform = NMatrix.new([4,4], [-0.9995825,-0.02527934,-0.0139845,50.61761,-0.02732551,0.9844284,0.1736463,-22.95566,0.009376526,0.1739562,-0.9847089,7.1521,0,0,0,1])\n      q = transform.quaternion\n      expect(Math.sqrt(q[0]**2 + q[1]**2 + q[2]**2 + q[3]**2)).to be_within(1e-6).of(1.0)\n    end\n  end\nend\n"
  },
  {
    "path": "spec/io/fortran_format_spec.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == fortran_format_spec.rb\n#\n# Basic tests for NMatrix::IO::FortranFormat.\n#\n\nrequire './lib/nmatrix'\n\ndescribe NMatrix::IO::FortranFormat do\n  it \"parses integer FORTRAN formats\" do\n    int_fmt =  NMatrix::IO::FortranFormat::Reader.new('(16I5)').parse\n\n    expect(int_fmt[:format_code]).to eq \"INT_ID\"\n    expect(int_fmt[:repeat])     .to eq 16\n    expect(int_fmt[:field_width]).to eq 5    \n\n    int_fmt = NMatrix::IO::FortranFormat::Reader.new('(I4)').parse \n\n    expect(int_fmt[:format_code]).to eq \"INT_ID\"\n    expect(int_fmt[:field_width]).to eq 4\n  end\n\n  it \"parses floating point FORTRAN formats\" do\n    fp_fmt = NMatrix::IO::FortranFormat::Reader.new('(10F7.1)').parse\n\n    expect(fp_fmt[:format_code])       .to eq \"FP_ID\"\n    expect(fp_fmt[:repeat])            .to eq 10\n    expect(fp_fmt[:field_width])       .to eq 7\n    expect(fp_fmt[:post_decimal_width]).to eq 1\n\n    fp_fmt = NMatrix::IO::FortranFormat::Reader.new('(F4.2)').parse\n\n    expect(fp_fmt[:format_code])       .to eq \"FP_ID\"\n    expect(fp_fmt[:field_width])       .to eq 4\n    expect(fp_fmt[:post_decimal_width]).to eq 2\n  end\n\n  it \"parses exponential FORTRAN formats\" do\n    exp_fmt = NMatrix::IO::FortranFormat::Reader.new('(2E8.3E3)').parse\n\n    expect(exp_fmt[:format_code])       .to eq \"EXP_ID\"\n    expect(exp_fmt[:repeat])            .to eq 2\n    expect(exp_fmt[:field_width])       .to eq 8\n    expect(exp_fmt[:post_decimal_width]).to eq 3\n    expect(exp_fmt[:exponent_width])    .to eq 3\n\n    exp_fmt = NMatrix::IO::FortranFormat::Reader.new('(3E3.6)').parse\n\n    expect(exp_fmt[:format_code])       .to eq \"EXP_ID\"\n    expect(exp_fmt[:repeat])            .to eq 3\n    expect(exp_fmt[:field_width])       .to eq 3\n    expect(exp_fmt[:post_decimal_width]).to eq 6\n\n    exp_fmt = NMatrix::IO::FortranFormat::Reader.new('(E4.5)').parse\n    expect(exp_fmt[:format_code])       .to eq \"EXP_ID\"\n    expect(exp_fmt[:field_width])       .to eq 4\n    expect(exp_fmt[:post_decimal_width]).to eq 5\n  end\n\n  ['I3', '(F4)', '(E3.', '(E4.E5)'].each do |bad_format|\n    it \"doesn't let bad input through : #{bad_format}\" do\n      expect {\n        NMatrix::IO::FortranFormat::Reader.new(bad_format).parse\n      }.to raise_error(IOError)\n  end\nend\nend\n"
  },
  {
    "path": "spec/io/harwell_boeing_spec.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == io_spec.rb\n#\n# Basic tests for NMatrix::IO::HarwelBoeing.\n\n# TODO : After the fortran format thing is done\nrequire 'spec_helper'\nrequire \"./lib/nmatrix\"\n\ndescribe NMatrix::IO::HarwellBoeing do\n  def check_file_header header\n    expect(header[:title])    .to eq(\"Title\")\n    expect(header[:key])      .to eq(\"Key\")\n\n    expect(header[:totcrd])   .to eq(5)\n    expect(header[:ptrcrd])   .to eq(1)\n    expect(header[:indcrd])   .to eq(1)\n    expect(header[:valcrd])   .to eq(3)\n    expect(header[:rhscrd])   .to eq(0)\n    \n    expect(header[:mxtype])   .to eq('RUA')\n    expect(header[:nrow])     .to eq(5)\n    expect(header[:ncol])     .to eq(5)\n    expect(header[:nnzero])   .to eq(13)\n    expect(header[:neltvl])   .to eq(0)\n\n    expect(header[:ptrfmt])   .to eq({\n      format_code: \"INT_ID\",\n      repeat:             6,         \n      field_width:        3\n      })\n    expect(header[:indfmt])   .to eq({\n      format_code: \"INT_ID\",\n      repeat:            13,\n      field_width:       3\n      })\n    expect(header[:valfmt])   .to eq({\n      format_code:         \"EXP_ID\",\n      repeat:                     5,\n      field_width:               15,\n      post_decimal_width:         8\n      })\n    expect(header[:rhsfmt])   .to eq({\n      format_code:         \"EXP_ID\",\n      repeat:                     5,\n      field_width:               15,\n      post_decimal_width:         8\n      })\n  end\n\n  it \"loads a Harwell Boeing file values and header (currently real only)\" do\n    n, h = NMatrix::IO::HarwellBoeing.load(\"spec/io/test.rua\")\n\n    expect(n.is_a? NMatrix).to eq(true)\n    expect(n.cols)         .to eq(5)\n    expect(n.rows)         .to eq(5)\n\n    expect(n[0,0])         .to eq(11)\n    expect(n[4,4])         .to eq(55)\n\n    expect(h.is_a? Hash).to eq(true) \n    check_file_header(h)\n  end\n\n  it \"loads only the header of the file when specified\" do\n    h = NMatrix::IO::HarwellBoeing.load(\"spec/io/test.rua\", header: true)\n\n    expect(h.is_a? Hash).to eq(true)\n    check_file_header(h)\n  end\n\n  it \"raises error for wrong Harwell Boeing file name\" do\n    expect{\n      NMatrix::IO::HarwellBoeing.load(\"spec/io/wrong.afx\")\n    }.to raise_error(IOError)\n  end\nend"
  },
  {
    "path": "spec/io/test.rua",
    "content": "Title                                                                   Key     \n             5             1             1             3             0\nRUA                        5             5            13             0\n(6I3)           (13I3)          (5E15.8)            (5E15.8)            \n  1  4  7  8 11 14\n  1  3  5  2  3  5  3  1  3  4  3  4  5\n11.0           31.0           51.0           22.0           32.0\n52.0           33.0           14.0           34.0           44.0\n35.0           45.0           55.0\n"
  },
  {
    "path": "spec/io_spec.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == io_spec.rb\n#\n# Basic tests for NMatrix::IO.\n#\nrequire \"tmpdir\" # Used to avoid cluttering the repository.\nrequire 'spec_helper'\nrequire \"./lib/nmatrix\"\n\ndescribe NMatrix::IO do\n  let(:tmp_dir)  { Dir.mktmpdir }\n  let(:test_out) { File.join(tmp_dir, 'test-out') }\n\n  it \"repacks a string\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    expect(NMatrix::IO::Matlab.repack(\"hello\", :miUINT8, :byte)).to eq(\"hello\")\n  end\n\n  it \"creates yale from internal byte-string function\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    ia = NMatrix::IO::Matlab.repack(\"\\0\\1\\3\\3\\4\", :miUINT8, :itype)\n    ja = NMatrix::IO::Matlab.repack(\"\\0\\1\\3\\0\\0\\0\\0\\0\\0\\0\\0\", :miUINT8, :itype)\n    n = NMatrix.new(:yale, [4,4], :byte, ia, ja, \"\\2\\3\\5\\4\", :byte)\n    expect(n[0,0]).to eq(2)\n    expect(n[1,1]).to eq(3)\n    expect(n[1,3]).to eq(5)\n    expect(n[3,0]).to eq(4)\n    expect(n[2,2]).to eq(0)\n    expect(n[3,3]).to eq(0)\n  end\n\n  it \"reads MATLAB .mat file containing a single square sparse matrix\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    n = NMatrix::IO::Matlab.load_mat(\"spec/4x4_sparse.mat\")\n    expect(n[0,0]).to eq(2)\n    expect(n[1,1]).to eq(3)\n    expect(n[1,3]).to eq(5)\n    expect(n[3,0]).to eq(4)\n    expect(n[2,2]).to eq(0)\n    expect(n[3,3]).to eq(0)\n  end\n\n  it \"reads MATLAB .mat file containing a single dense integer matrix\" do\n    n = NMatrix::IO::Matlab.load_mat(\"spec/4x5_dense.mat\")\n    m = NMatrix.new([4,5], [16,17,18,19,20,15,14,13,12,11,6,7,8,9,10,5,4,3,2,1])\n    expect(n).to eq(m)\n  end\n\n  it \"reads MATLAB .mat file containing a single dense double matrix\" do\n    n = NMatrix::IO::Matlab.load_mat(\"spec/2x2_dense_double.mat\")\n    m = NMatrix.new(2, [1.1, 2.0, 3.0, 4.0], dtype: :float64)\n    expect(n).to eq(m)\n  end\n\n  it \"loads and saves MatrixMarket .mtx file containing a single large sparse double matrix\" do\n    pending \"spec disabled because it's so slow\"\n    n = NMatrix::IO::Market.load(\"spec/utm5940.mtx\")\n    NMatrix::IO::Market.save(n, \"spec/utm5940.saved.mtx\")\n    expect(`wc -l spec/utm5940.mtx`.split[0]).to eq(`wc -l spec/utm5940.saved.mtx`.split[0])\n  end\n\n  it \"loads a Point Cloud Library PCD file\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    n = NMatrix::IO::PointCloud.load(\"spec/test.pcd\")\n    expect(n.column(0).sort.uniq.size).to eq(1)\n    expect(n.column(0).sort.uniq.first).to eq(207.008)\n    expect(n[0,3]).to eq(0)\n  end\n\n  it \"raises an error when reading a non-existent file\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    fn = rand(10000000).to_i.to_s\n    while File.exist?(fn)\n      fn = rand(10000000).to_i.to_s\n    end\n    expect{ NMatrix.read(fn) }.to raise_error(Errno::ENOENT)\n  end\n\n  it \"reads and writes NMatrix dense\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    n = NMatrix.new(:dense, [4,3], [0,1,2,3,4,5,6,7,8,9,10,11], :int32)\n    n.write(test_out)\n\n    m = NMatrix.read(test_out)\n    expect(n).to eq(m)\n  end\n\n  it \"reads and writes NMatrix dense as symmetric\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    n = NMatrix.new(:dense, 3, [0,1,2,1,3,4,2,4,5], :int16)\n    n.write(test_out, :symmetric)\n\n    m = NMatrix.read(test_out)\n    expect(n).to eq(m)\n  end\n\n  it \"reads and writes NMatrix dense as skew\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    n = NMatrix.new(:dense, 3, [0,1,2,-1,3,4,-2,-4,5], :float64)\n    n.write(test_out, :skew)\n\n    m = NMatrix.read(test_out)\n    expect(n).to eq(m)\n  end\n\n  it \"reads and writes NMatrix dense as hermitian\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    n = NMatrix.new(:dense, 3, [0,1,2,1,3,4,2,4,5], :complex64)\n    n.write(test_out, :hermitian)\n\n    m = NMatrix.read(test_out)\n    expect(n).to eq(m)\n  end\n\n  it \"reads and writes NMatrix dense as upper\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    n = NMatrix.new(:dense, 3, [-1,1,2,3,4,5,6,7,8], :int32)\n    n.write(test_out, :upper)\n\n    m = NMatrix.new(:dense, 3, [-1,1,2,0,4,5,0,0,8], :int32) # lower version of the same\n\n    o = NMatrix.read(test_out)\n    expect(o).to eq(m)\n    expect(o).not_to eq(n)\n  end\n\n  it \"reads and writes NMatrix dense as lower\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    n = NMatrix.new(:dense, 3, [-1,1,2,3,4,5,6,7,8], :int32)\n    n.write(test_out, :lower)\n\n    m = NMatrix.new(:dense, 3, [-1,0,0,3,4,0,6,7,8], :int32) # lower version of the same\n\n    o = NMatrix.read(test_out)\n    expect(o).to eq(m)\n    expect(o).not_to eq(n)\n  end\nend\n"
  },
  {
    "path": "spec/lapack_core_spec.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == lapack_core_spec.rb\n#\n# Tests for LAPACK functions that have internal implementations (i.e. they\n# don't rely on external libraries) and also functions that are implemented\n# by both nmatrix-atlas and nmatrix-lapacke. These tests will also be run for the\n# plugins that do use external libraries, since they will override the\n# internal implmentations.\n#\n\nrequire 'spec_helper'\n\ndescribe \"NMatrix::LAPACK functions with internal implementations\" do\n  # where integer math is allowed\n  [:byte, :int8, :int16, :int32, :int64, :float32, :float64, :complex64, :complex128].each do |dtype|\n    context dtype do\n      # This spec seems a little weird. It looks like laswp ignores the last\n      # element of piv, though maybe I misunderstand smth. It would make\n      # more sense if piv were [2,1,3,3]\n      it \"exposes clapack laswp\" do\n        a = NMatrix.new(:dense, [3,4], [1,2,3,4,5,6,7,8,9,10,11,12], dtype)\n        NMatrix::LAPACK::clapack_laswp(3, a, 4, 0, 3, [2,1,3,0], 1)\n        b = NMatrix.new(:dense, [3,4], [3,2,4,1,7,6,8,5,11,10,12,9], dtype)\n        expect(a).to eq(b)\n      end\n\n      # This spec is OK, because the default behavior for permute_columns\n      # is :intuitive, which is different from :lapack (default laswp behavior)\n      it \"exposes NMatrix#permute_columns and #permute_columns! (user-friendly laswp)\" do\n        a = NMatrix.new(:dense, [3,4], [1,2,3,4,5,6,7,8,9,10,11,12], dtype)\n        b = NMatrix.new(:dense, [3,4], [3,2,4,1,7,6,8,5,11,10,12,9], dtype)\n        piv = [2,1,3,0]\n        r = a.permute_columns(piv)\n        expect(r).not_to eq(a)\n        expect(r).to eq(b)\n        a.permute_columns!(piv)\n        expect(a).to eq(b)\n      end\n    end\n  end\n\n  # where integer math is not allowed\n  [:float32, :float64, :complex64, :complex128].each do |dtype|\n    context dtype do\n\n      # clapack_getrf performs a LU decomposition, but unlike the\n      # standard LAPACK getrf, it's the upper matrix that has unit diagonals\n      # and the permutation is done in columns not rows. See the code for\n      # details.\n      # Also the rows in the pivot vector are indexed starting from 0,\n      # rather than 1 as in LAPACK\n      it \"calculates LU decomposition using clapack_getrf (row-major, square)\" do\n        a = NMatrix.new(3, [4,9,2,3,5,7,8,1,6], dtype: dtype)\n        ipiv = NMatrix::LAPACK::clapack_getrf(:row, a.shape[0], a.shape[1], a, a.shape[1])\n        b = NMatrix.new(3,[9, 2.0/9, 4.0/9,\n                           5, 53.0/9, 7.0/53,\n                           1, 52.0/9, 360.0/53], dtype: dtype)\n        ipiv_true = [1,2,2]\n\n        # delta varies for different dtypes\n        err = case dtype\n                when :float32, :complex64\n                  1e-6\n                when :float64, :complex128\n                  1e-15\n              end\n\n        expect(a).to be_within(err).of(b)\n        expect(ipiv).to eq(ipiv_true)\n      end\n\n      it \"calculates LU decomposition using clapack_getrf (row-major, rectangular)\" do\n        a = NMatrix.new([3,4], GETRF_EXAMPLE_ARRAY, dtype: dtype)\n        ipiv = NMatrix::LAPACK::clapack_getrf(:row, a.shape[0], a.shape[1], a, a.shape[1])\n        #we can't use GETRF_SOLUTION_ARRAY here, because of the different\n        #conventions of clapack_getrf\n        b = NMatrix.new([3,4],[10.0, -0.1,      0.0,       0.4,\n                               3.0,   9.3,  20.0/93,   38.0/93,\n                               1.0,   7.1, 602.0/93, 251.0/602], dtype: dtype)\n        ipiv_true = [2,2,2]\n\n        # delta varies for different dtypes\n        err = case dtype\n                when :float32, :complex64\n                  1e-6\n                when :float64, :complex128\n                  1e-15\n              end\n\n        expect(a).to be_within(err).of(b)\n        expect(ipiv).to eq(ipiv_true)\n      end\n\n      #Normally we wouldn't check column-major routines, since all our matrices\n      #are row-major, but we use the column-major version in #getrf!, so we\n      #want to test it here.\n      it \"calculates LU decomposition using clapack_getrf (col-major, rectangular)\" do\n        #this is supposed to represent the 3x2 matrix\n        # -1  2\n        #  0  3\n        #  1 -2\n        a = NMatrix.new([1,6], [-1,0,1,2,3,-2], dtype: dtype)\n        ipiv = NMatrix::LAPACK::clapack_getrf(:col, 3, 2, a, 3)\n        b = NMatrix.new([1,6], [-1,0,-1,2,3,0], dtype: dtype)\n        ipiv_true = [0,1]\n\n        # delta varies for different dtypes\n        err = case dtype\n                when :float32, :complex64\n                  1e-6\n                when :float64, :complex128\n                  1e-15\n              end\n\n        expect(a).to be_within(err).of(b)\n        expect(ipiv).to eq(ipiv_true)\n      end\n\n      it \"calculates LU decomposition using #getrf! (rectangular)\" do\n        a = NMatrix.new([3,4], GETRF_EXAMPLE_ARRAY, dtype: dtype)\n        ipiv = a.getrf!\n        b = NMatrix.new([3,4], GETRF_SOLUTION_ARRAY, dtype: dtype)\n        ipiv_true = [2,3,3]\n\n        # delta varies for different dtypes\n        err = case dtype\n                when :float32, :complex64\n                  1e-6\n                when :float64, :complex128\n                  1e-14\n              end\n\n        expect(a).to be_within(err).of(b)\n        expect(ipiv).to eq(ipiv_true)\n      end\n\n      it \"calculates LU decomposition using #getrf! (square)\" do\n        a = NMatrix.new([4,4], [0,1,2,3, 1,1,1,1, 0,-1,-2,0, 0,2,0,2], dtype: dtype)\n        ipiv = a.getrf!\n\n        b = NMatrix.new([4,4], [1,1,1,1, 0,2,0,2, 0,-0.5,-2,1, 0,0.5,-1,3], dtype: dtype)\n        ipiv_true = [2,4,3,4]\n\n        expect(a).to eq(b)\n        expect(ipiv).to eq(ipiv_true)\n      end\n\n      # Together, these calls are basically xGESV from LAPACK: http://www.netlib.org/lapack/double/dgesv.f\n      it \"exposes clapack_getrs\" do\n        a     = NMatrix.new(3, [-2,4,-3, 3,-2,1, 0,-4,3], dtype: dtype)\n        ipiv  = NMatrix::LAPACK::clapack_getrf(:row, 3, 3, a, 3)\n        b     = NMatrix.new([3,1], [-1, 17, -9], dtype: dtype)\n\n        NMatrix::LAPACK::clapack_getrs(:row, false, 3, 1, a, 3, ipiv, b, 3)\n\n        expect(b[0]).to eq(5)\n        expect(b[1]).to eq(-15.0/2)\n        expect(b[2]).to eq(-13)\n      end\n\n      it \"solves matrix equation (non-vector rhs) using clapack_getrs\" do\n        a     = NMatrix.new(3, [-2,4,-3, 3,-2,1, 0,-4,3], dtype: dtype)\n        b     = NMatrix.new([3,2], [-1,2, 17,1, -9,-4], dtype: dtype)\n\n        n = a.shape[0]\n        nrhs = b.shape[1]\n\n        ipiv  = NMatrix::LAPACK::clapack_getrf(:row, n, n, a, n)\n        # Even though we pass :row to clapack_getrs, it still interprets b as\n        # column-major, so need to transpose b before and after:\n        b = b.transpose\n        NMatrix::LAPACK::clapack_getrs(:row, false, n, nrhs, a, n, ipiv, b, n)\n        b = b.transpose\n\n        b_true = NMatrix.new([3,2], [5,1, -7.5,1, -13,0], dtype: dtype)\n        expect(b).to eq(b_true)\n      end\n\n      #posv is like potrf+potrs\n      #posv is implemented in both nmatrix-atlas and nmatrix-lapacke, so the spec\n      #needs to be shared here\n      it \"solves a (symmetric positive-definite) matrix equation using posv (vector rhs)\" do\n        a = NMatrix.new(3, [4, 0,-1,\n                            0, 2, 1,\n                            0, 0, 1], dtype: dtype)\n        b = NMatrix.new([3,1], [4,2,0], dtype: dtype)\n\n        begin\n          x = NMatrix::LAPACK::posv(:upper, a, b)\n        rescue NotImplementedError => e\n          pending e.to_s\n        end\n\n        x_true = NMatrix.new([3,1], [1, 1, 0], dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-5\n                when :float64, :complex128\n                  1e-14\n              end\n\n        expect(x).to be_within(err).of(x_true)\n      end\n\n      it \"solves a (symmetric positive-definite) matrix equation using posv (non-vector rhs)\" do\n        a = NMatrix.new(3, [4, 0,-1,\n                            0, 2, 1,\n                            0, 0, 1], dtype: dtype)\n        b = NMatrix.new([3,2], [4,-1, 2,-1, 0,0], dtype: dtype)\n\n        begin\n          x = NMatrix::LAPACK::posv(:upper, a, b)\n        rescue NotImplementedError => e\n          pending e.to_s\n        end\n\n        x_true = NMatrix.new([3,2], [1,0, 1,-1, 0,1], dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-5\n                when :float64, :complex128\n                  1e-14\n              end\n\n        expect(x).to be_within(err).of(x_true)\n      end\n\n      it \"calculates the singular value decomposition with NMatrix#gesvd\" do\n        #example from Wikipedia\n        m = 4\n        n = 5\n        mn_min = [m,n].min\n        a = NMatrix.new([m,n],[1,0,0,0,2, 0,0,3,0,0, 0,0,0,0,0, 0,4,0,0,0], dtype: dtype)\n\n        begin\n          u, s, vt = a.gesvd\n        rescue NotImplementedError => e\n          pending e.to_s\n        end\n\n        s_true = NMatrix.new([mn_min,1], [4,3,Math.sqrt(5),0], dtype: a.abs_dtype)\n        u_true = NMatrix.new([m,m], [0,0,1,0, 0,1,0,0, 0,0,0,-1, 1,0,0,0], dtype: dtype)\n        vt_true = NMatrix.new([n,n], [0,1,0,0,0, 0,0,1,0,0, Math.sqrt(0.2),0,0,0,Math.sqrt(0.8), 0,0,0,1,0, -Math.sqrt(0.8),0,0,0,Math.sqrt(0.2)], dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-5\n                when :float64, :complex128\n                  1e-14\n              end\n\n        expect(s).to be_within(err).of(s_true)\n        expect(u).to be_within(err).of(u_true)\n        expect(vt).to be_within(err).of(vt_true)\n\n        expect(s.dtype).to eq(a.abs_dtype)\n        expect(u.dtype).to eq(dtype)\n        expect(vt.dtype).to eq(dtype)\n      end\n\n      it \"calculates the singular value decomposition with NMatrix#gesdd\" do\n        #example from Wikipedia\n        m = 4\n        n = 5\n        mn_min = [m,n].min\n        a = NMatrix.new([m,n],[1,0,0,0,2, 0,0,3,0,0, 0,0,0,0,0, 0,4,0,0,0], dtype: dtype)\n\n        begin\n          u, s, vt = a.gesdd\n        rescue NotImplementedError => e\n          pending e.to_s\n        end\n\n        s_true = NMatrix.new([mn_min,1], [4,3,Math.sqrt(5),0], dtype: a.abs_dtype)\n        u_true = NMatrix.new([m,m], [0,0,1,0, 0,1,0,0, 0,0,0,-1, 1,0,0,0], dtype: dtype)\n        vt_true = NMatrix.new([n,n], [0,1,0,0,0, 0,0,1,0,0, Math.sqrt(0.2),0,0,0,Math.sqrt(0.8), 0,0,0,1,0, -Math.sqrt(0.8),0,0,0,Math.sqrt(0.2)], dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-5\n                when :float64, :complex128\n                  1e-14\n              end\n\n        expect(s).to be_within(err).of(s_true)\n        expect(u).to be_within(err).of(u_true)\n        expect(vt).to be_within(err).of(vt_true)\n      end\n\n\n      it \"calculates eigenvalues and eigenvectors NMatrix::LAPACK.geev (real matrix, complex eigenvalues)\" do\n        n = 3\n        a = NMatrix.new([n,n], [-1,0,0, 0,1,-2, 0,1,-1], dtype: dtype)\n\n        begin\n          eigenvalues, vl, vr = NMatrix::LAPACK.geev(a)\n        rescue NotImplementedError => e\n          pending e.to_s\n        end\n\n        eigenvalues_true = NMatrix.new([n,1], [Complex(0,1), -Complex(0,1), -1], dtype: NMatrix.upcast(dtype, :complex64))\n        vr_true = NMatrix.new([n,n],[0,0,1,\n                                     2/Math.sqrt(6),2/Math.sqrt(6),0,\n                                     Complex(1,-1)/Math.sqrt(6),Complex(1,1)/Math.sqrt(6),0], dtype: NMatrix.upcast(dtype, :complex64))\n        vl_true = NMatrix.new([n,n],[0,0,1,\n                                     Complex(-1,1)/Math.sqrt(6),Complex(-1,-1)/Math.sqrt(6),0,\n                                     2/Math.sqrt(6),2/Math.sqrt(6),0], dtype: NMatrix.upcast(dtype, :complex64))\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-6\n                when :float64, :complex128\n                  1e-15\n              end\n\n        expect(eigenvalues).to be_within(err).of(eigenvalues_true)\n        expect(vr).to be_within(err).of(vr_true)\n        expect(vl).to be_within(err).of(vl_true)\n\n        expect(eigenvalues.dtype).to eq(NMatrix.upcast(dtype, :complex64))\n        expect(vr.dtype).to eq(NMatrix.upcast(dtype, :complex64))\n        expect(vl.dtype).to eq(NMatrix.upcast(dtype, :complex64))\n      end\n\n      it \"calculates eigenvalues and eigenvectors NMatrix::LAPACK.geev (real matrix, real eigenvalues)\" do\n        n = 3\n        a = NMatrix.new([n,n], [2,0,0, 0,3,2, 0,1,2], dtype: dtype)\n\n        begin\n          eigenvalues, vl, vr = NMatrix::LAPACK.geev(a)\n        rescue NotImplementedError => e\n          pending e.to_s\n        end\n\n        eigenvalues_true = NMatrix.new([n,1], [1, 4, 2], dtype: dtype)\n\n        # For some reason, some of the eigenvectors have different signs\n        # when we use the complex versions of geev. This is totally fine, since\n        # they are still normalized eigenvectors even with the sign flipped.\n        if a.complex_dtype?\n          vr_true = NMatrix.new([n,n],[0,0,1,\n                                       1/Math.sqrt(2),2/Math.sqrt(5),0,\n                                       -1/Math.sqrt(2),1/Math.sqrt(5),0], dtype: dtype)\n          vl_true = NMatrix.new([n,n],[0,0,1,\n                                       -1/Math.sqrt(5),1/Math.sqrt(2),0,\n                                       2/Math.sqrt(5),1/Math.sqrt(2),0], dtype: dtype)\n        else\n          vr_true = NMatrix.new([n,n],[0,0,1,\n                                       1/Math.sqrt(2),-2/Math.sqrt(5),0,\n                                       -1/Math.sqrt(2),-1/Math.sqrt(5),0], dtype: dtype)\n          vl_true = NMatrix.new([n,n],[0,0,1,\n                                       1/Math.sqrt(5),-1/Math.sqrt(2),0,\n                                       -2/Math.sqrt(5),-1/Math.sqrt(2),0], dtype: dtype)\n        end\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-6\n                when :float64, :complex128\n                  1e-15\n              end\n\n        expect(eigenvalues).to be_within(err).of(eigenvalues_true)\n        expect(vr).to be_within(err).of(vr_true)\n        expect(vl).to be_within(err).of(vl_true)\n\n        expect(eigenvalues.dtype).to eq(dtype)\n        expect(vr.dtype).to eq(dtype)\n        expect(vl.dtype).to eq(dtype)\n      end\n\n      it \"calculates eigenvalues and eigenvectors NMatrix::LAPACK.geev (left eigenvectors only)\" do\n        n = 3\n        a = NMatrix.new([n,n], [-1,0,0, 0,1,-2, 0,1,-1], dtype: dtype)\n\n        begin\n          eigenvalues, vl = NMatrix::LAPACK.geev(a, :left)\n        rescue NotImplementedError => e\n          pending e.to_s\n        end\n\n        eigenvalues_true = NMatrix.new([n,1], [Complex(0,1), -Complex(0,1), -1], dtype: NMatrix.upcast(dtype, :complex64))\n        vl_true = NMatrix.new([n,n],[0,0,1,\n                                     Complex(-1,1)/Math.sqrt(6),Complex(-1,-1)/Math.sqrt(6),0,\n                                     2/Math.sqrt(6),2/Math.sqrt(6),0], dtype: NMatrix.upcast(dtype, :complex64))\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-6\n                when :float64, :complex128\n                  1e-15\n              end\n\n        expect(eigenvalues).to be_within(err).of(eigenvalues_true)\n        expect(vl).to be_within(err).of(vl_true)\n      end\n\n      it \"calculates eigenvalues and eigenvectors NMatrix::LAPACK.geev (right eigenvectors only)\" do\n        n = 3\n        a = NMatrix.new([n,n], [-1,0,0, 0,1,-2, 0,1,-1], dtype: dtype)\n\n        begin\n          eigenvalues, vr = NMatrix::LAPACK.geev(a, :right)\n        rescue NotImplementedError => e\n          pending e.to_s\n        end\n\n        eigenvalues_true = NMatrix.new([n,1], [Complex(0,1), -Complex(0,1), -1], dtype: NMatrix.upcast(dtype, :complex64))\n        vr_true = NMatrix.new([n,n],[0,0,1,\n                                     2/Math.sqrt(6),2/Math.sqrt(6),0,\n                                     Complex(1,-1)/Math.sqrt(6),Complex(1,1)/Math.sqrt(6),0], dtype: NMatrix.upcast(dtype, :complex64))\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-6\n                when :float64, :complex128\n                  1e-15\n              end\n\n        expect(eigenvalues).to be_within(err).of(eigenvalues_true)\n        expect(vr).to be_within(err).of(vr_true)\n      end\n    end\n  end\n\n  [:complex64, :complex128].each do |dtype|\n    context dtype do\n      it \"calculates eigenvalues and eigenvectors NMatrix::LAPACK.geev (complex matrix)\" do\n        n = 3\n        a = NMatrix.new([n,n], [Complex(0,1),0,0, 0,3,2, 0,1,2], dtype: dtype)\n\n        begin\n          eigenvalues, vl, vr = NMatrix::LAPACK.geev(a)\n        rescue NotImplementedError => e\n          pending e.to_s\n        end\n\n        eigenvalues_true = NMatrix.new([n,1], [1, 4, Complex(0,1)], dtype: dtype)\n        vr_true = NMatrix.new([n,n],[0,0,1,\n                                     1/Math.sqrt(2),2/Math.sqrt(5),0,\n                                     -1/Math.sqrt(2),1/Math.sqrt(5),0], dtype: dtype)\n        vl_true = NMatrix.new([n,n],[0,0,1,\n                                     -1/Math.sqrt(5),1/Math.sqrt(2),0,\n                                     2/Math.sqrt(5),1/Math.sqrt(2),0], dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-6\n                when :float64, :complex128\n                  1e-15\n              end\n\n        expect(eigenvalues).to be_within(err).of(eigenvalues_true)\n        expect(vr).to be_within(err).of(vr_true)\n        expect(vl).to be_within(err).of(vl_true)\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "spec/leakcheck.rb",
    "content": "require \"./lib/nmatrix\"\n\n# Fixed:\n#n = NMatrix.new(:yale, [8,2], :int64)\n#m = NMatrix.new(:yale, [2,8], :int64)\n#100.times do\n#  n.dot(m)\n#end\n#GC.start\n\n# Remaining:\n100.times do |t|\n  n = NMatrix.new(:dense, 1000, :float64)\n  n[0,t] = 1.0\n  puts n[t,0]\nend\n"
  },
  {
    "path": "spec/math_spec.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == math_spec.rb\n#\n# Tests for non-BLAS and non-LAPACK math functions, or for simplified\n# versions of unfriendly BLAS and LAPACK functions.\n#\n\nrequire 'spec_helper'\n\ndescribe \"math\" do\n  context \"elementwise math functions\" do\n\n    [:dense,:list,:yale].each do |stype|\n      context stype do\n\n        [:int64,:float64].each do |dtype|\n          context dtype do\n            before :each do\n              @size = [2,2]\n              @m = NMatrix.seq(@size, dtype: dtype, stype: stype)+1\n              @a = @m.to_a.flatten\n            end\n\n            NMatrix::NMMath::METHODS_ARITY_1.each do |meth|\n              #skip inverse regular trig functions\n              next if meth.to_s.start_with?('a') and (not meth.to_s.end_with?('h')) \\\n                and NMatrix::NMMath::METHODS_ARITY_1.include?(\n                  meth.to_s[1...meth.to_s.length].to_sym)\n              next if meth == :atanh\n\n              if meth == :-@\n                it \"should correctly apply elementwise negation\" do\n                  expect(@m.send(meth)).to eq N.new(@size, @a.map { |e| -e }, dtype: dtype, stype: stype)\n                end\n                next\n              end\n\n              it \"should correctly apply elementwise #{meth}\" do\n\n                expect(@m.send(meth)).to eq N.new(@size, @a.map{ |e| Math.send(meth, e) },\n                                                 dtype: :float64, stype: stype)\n              end\n            end\n\n            NMatrix::NMMath::METHODS_ARITY_2.each do |meth|\n              next if meth == :atan2\n              it \"should correctly apply elementwise #{meth}\" do\n                expect(@m.send(meth, @m)).to eq N.new(@size, @a.map{ |e|\n                                                     Math.send(meth, e, e) },\n                                                     dtype: :float64,\n                                                     stype: stype)\n              end\n\n              it \"should correctly apply elementwise #{meth} with a scalar first arg\" do\n                expect(Math.send(meth, 1, @m)).to eq N.new(@size, @a.map { |e| Math.send(meth, 1, e) }, dtype: :float64, stype: stype)\n              end\n\n              it \"should correctly apply elementwise #{meth} with a scalar second arg\" do\n                expect(@m.send(meth, 1)).to eq N.new(@size, @a.map { |e| Math.send(meth, e, 1) }, dtype: :float64, stype: stype)\n              end\n            end\n\n            it \"should correctly apply elementwise natural log\" do\n              expect(@m.log).to eq N.new(@size, [0, Math.log(2), Math.log(3), Math.log(4)],\n                                        dtype: :float64, stype: stype)\n            end\n\n            it \"should correctly apply elementwise log with arbitrary base\" do\n              expect(@m.log(3)).to eq N.new(@size, [0, Math.log(2,3), 1, Math.log(4,3)],\n                                           dtype: :float64, stype: stype)\n            end\n\n            context \"inverse trig functions\" do\n              before :each do\n                @m = NMatrix.seq(@size, dtype: dtype, stype: stype)/4\n                @a = @m.to_a.flatten\n              end\n              [:asin, :acos, :atan, :atanh].each do |atf|\n\n                it \"should correctly apply elementwise #{atf}\" do\n                  expect(@m.send(atf)).to eq N.new(@size,\n                                               @a.map{ |e| Math.send(atf, e) },\n                                               dtype: :float64, stype: stype)\n                end\n              end\n\n              it \"should correctly apply elementtwise atan2\" do\n                expect(@m.atan2(@m*0+1)).to eq N.new(@size,\n                  @a.map { |e| Math.send(:atan2, e, 1) }, dtype: :float64, stype: stype)\n              end\n\n              it \"should correctly apply elementwise atan2 with a scalar first arg\" do\n                expect(Math.atan2(1, @m)).to eq N.new(@size, @a.map { |e| Math.send(:atan2, 1, e) }, dtype: :float64, stype: stype)\n              end\n\n              it \"should correctly apply elementwise atan2 with a scalar second arg\" do\n                  expect(@m.atan2(1)).to eq N.new(@size, @a.map { |e| Math.send(:atan2, e, 1) }, dtype: :float64, stype: stype)\n              end\n            end\n          end\n        end\n\n        context \"Floor and ceil for #{stype}\" do\n\n          [:floor, :ceil].each do |meth|\n            ALL_DTYPES.each do |dtype|\n              context dtype do\n                before :each do\n                  @size = [2,2]\n                  @m    = NMatrix.seq(@size, dtype: dtype, stype: stype)+1 unless jruby? and dtype == :object\n                  @a    = @m.to_a.flatten\n                end\n\n                if dtype.to_s.match(/int/) or [:byte, :object].include?(dtype)\n                  it \"should return #{dtype} for #{dtype}\" do\n                    pending(\"not yet implemented for NMatrix-JRuby\") if jruby? and dtype == :object\n\n                    expect(@m.send(meth)).to eq N.new(@size, @a.map { |e| e.send(meth) }, dtype: dtype, stype: stype)\n\n                    if dtype == :object\n                      expect(@m.send(meth).dtype).to eq :object\n                    else\n                      expect(@m.send(meth).integer_dtype?).to eq true\n                    end\n                  end\n                elsif dtype.to_s.match(/float/)\n                  it \"should return dtype int64 for #{dtype}\" do\n\n                    expect(@m.send(meth)).to eq N.new(@size, @a.map { |e| e.send(meth) }, dtype: dtype, stype: stype)\n\n                    expect(@m.send(meth).dtype).to eq :int64\n                  end\n                elsif dtype.to_s.match(/complex/)\n                  it \"should properly calculate #{meth} for #{dtype}\" do\n                    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n\n                    expect(@m.send(meth)).to eq N.new(@size, @a.map { |e| e = Complex(e.real.send(meth), e.imag.send(meth)) }, dtype: dtype, stype: stype)\n\n                    expect(@m.send(meth).dtype).to eq :complex64  if dtype == :complex64\n                    expect(@m.send(meth).dtype).to eq :complex128 if dtype == :complex128\n                  end\n                end\n              end\n            end\n          end\n        end\n\n        context \"#round for #{stype}\" do\n          ALL_DTYPES.each do |dtype|\n            context dtype do\n              before :each do\n                @size = [2,2]\n                @mat  = NMatrix.new @size, [1.33334, 0.9998, 1.9999, -8.9999],\n                  dtype: dtype, stype: stype\n                @ans  = @mat.to_a.flatten unless jruby? and dtype == :object\n              end\n\n              it \"rounds\" do\n                pending(\"not yet implemented for NMatrix-JRuby\") if jruby? and dtype == :object\n                expect(@mat.round).to eq(N.new(@size, @ans.map { |a| a.round},\n                  dtype: dtype, stype: stype))\n              end unless(/complex/ =~ dtype)\n\n              it \"rounds with args\" do\n                pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n                expect(@mat.round(2)).to eq(N.new(@size, @ans.map { |a| a.round(2)},\n                  dtype: dtype, stype: stype))\n              end unless(/complex/ =~ dtype)\n\n              it \"rounds complex with args\" do\n                pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n                puts @mat.round(2)\n                expect(@mat.round(2)).to be_within(0.0001).of(N.new [2,2], @ans.map {|a|\n                  Complex(a.real.round(2), a.imag.round(2))},dtype: dtype, stype: stype)\n              end if(/complex/ =~ dtype)\n\n              it \"rounds complex\" do\n                pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n                expect(@mat.round).to eq(N.new [2,2], @ans.map {|a|\n                  Complex(a.real.round, a.imag.round)},dtype: dtype, stype: stype)\n              end if(/complex/ =~ dtype)\n            end\n          end\n        end\n\n      end\n    end\n  end\n\n  NON_INTEGER_DTYPES.each do |dtype|\n    context dtype do\n      before do\n        @m = NMatrix.new([3,4], GETRF_EXAMPLE_ARRAY, dtype: dtype)\n        @err = case dtype\n                 when :float32, :complex64\n                   1e-6\n                 when :float64, :complex128\n                   1e-14\n               end\n      end\n\n      #haven't check this spec yet. Also it doesn't check all the elements of the matrix.\n      it \"should correctly factorize a matrix\" do\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n        pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n        a = @m.factorize_lu\n        expect(a).to be_within(@err).of(NMatrix.new([3,4], GETRF_SOLUTION_ARRAY, dtype: dtype))\n      end\n\n      it \"also returns the permutation matrix\" do\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n        pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n\n        a, p = @m.factorize_lu perm_matrix: true\n\n        expect(a).to be_within(@err).of(NMatrix.new([3,4], GETRF_SOLUTION_ARRAY, dtype: dtype))\n\n        p_true = NMatrix.new([3,3], [0,0,1,1,0,0,0,1,0], dtype: dtype)\n        expect(p).to eq(p_true)\n      end\n    end\n  end\n\n  NON_INTEGER_DTYPES.each do |dtype|\n    context dtype do\n\n      it \"calculates cholesky decomposition using potrf (lower)\" do\n        #a = NMatrix.new([3,3],[1,1,1, 1,2,2, 1,2,6], dtype: dtype)\n        # We use the matrix\n        # 1 1 1\n        # 1 2 2\n        # 1 2 6\n        # which is symmetric and positive-definite as required, but\n        # we need only store the lower-half of the matrix.\n        pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n        a = NMatrix.new([3,3],[1,0,0, 1,2,0, 1,2,6], dtype: dtype)\n        begin\n          r = a.potrf!(:lower)\n\n          b = NMatrix.new([3,3],[1,0,0, 1,1,0, 1,1,2], dtype: dtype)\n          expect(a).to eq(b)\n          expect(r).to eq(b)\n        rescue NotImplementedError\n          pending \"potrf! not implemented without plugins\"\n        end\n      end\n\n      it \"calculates cholesky decomposition using potrf (upper)\" do\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n        pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n\n        a = NMatrix.new([3,3],[1,1,1, 0,2,2, 0,0,6], dtype: dtype)\n        begin\n          r = a.potrf!(:upper)\n\n          b = NMatrix.new([3,3],[1,1,1, 0,1,1, 0,0,2], dtype: dtype)\n          expect(a).to eq(b)\n          expect(r).to eq(b)\n        rescue NotImplementedError\n          pending \"potrf! not implemented without plugins\"\n        end\n      end\n\n      it \"calculates cholesky decomposition using #factorize_cholesky\" do\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n        a = NMatrix.new([3,3],[1,2,1, 2,13,5, 1,5,6], dtype: dtype)\n        begin\n          u,l = a.factorize_cholesky\n\n          l_true = NMatrix.new([3,3],[1,0,0, 2,3,0, 1,1,2], dtype: dtype)\n          u_true = l_true.transpose\n          expect(u).to eq(u_true)\n          expect(l).to eq(l_true)\n        rescue NotImplementedError\n          pending \"potrf! not implemented without plugins\"\n        end\n      end\n    end\n  end\n\n  NON_INTEGER_DTYPES.each do |dtype|\n    context dtype do\n\n      it \"calculates QR decomposition using factorize_qr for a square matrix\" do\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n        a = NMatrix.new(3, [12.0, -51.0,   4.0,\n                             6.0, 167.0, -68.0,\n                            -4.0,  24.0, -41.0] , dtype: dtype)\n\n        q_solution = NMatrix.new([3,3], Q_SOLUTION_ARRAY_2, dtype: dtype)\n\n        r_solution = NMatrix.new([3,3], [-14.0, -21.0, 14,\n                                           0.0,  -175, 70,\n                                           0.0, 0.0,  -35] , dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-4\n                when :float64, :complex128\n                  1e-13\n              end\n\n        begin\n          q,r = a.factorize_qr\n\n          expect(q).to be_within(err).of(q_solution)\n          expect(r).to be_within(err).of(r_solution)\n\n        rescue NotImplementedError\n          pending \"Suppressing a NotImplementedError when the lapacke plugin is not available\"\n        end\n      end\n\n      it \"calculates QR decomposition using factorize_qr for a tall and narrow rectangular matrix\" do\n        pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n\n        a = NMatrix.new([4,2], [34.0, 21.0,\n                                23.0, 53.0,\n                                26.0, 346.0,\n                                23.0, 121.0] , dtype: dtype)\n\n        q_solution = NMatrix.new([4,4], Q_SOLUTION_ARRAY_1, dtype: dtype)\n\n        r_solution = NMatrix.new([4,2], [-53.75872022286244, -255.06559574252242,\n                                                        0.0,  269.34836526051555,\n                                                        0.0,                 0.0,\n                                                        0.0,                 0.0] , dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-4\n                when :float64, :complex128\n                  1e-13\n              end\n\n        begin\n          q,r = a.factorize_qr\n\n          expect(q).to be_within(err).of(q_solution)\n          expect(r).to be_within(err).of(r_solution)\n\n        rescue NotImplementedError\n          pending \"Suppressing a NotImplementedError when the lapacke plugin is not available\"\n        end\n      end\n\n      it \"calculates QR decomposition using factorize_qr for a short and wide rectangular matrix\" do\n        pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n\n        a = NMatrix.new([3,4], [123,31,57,81,92,14,17,36,42,34,11,28], dtype: dtype)\n\n        q_solution = NMatrix.new([3,3], Q_SOLUTION_ARRAY_3, dtype: dtype)\n\n        r_solution = NMatrix.new([3,4], R_SOLUTION_ARRAY, dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-4\n                when :float64, :complex128\n                  1e-13\n              end\n\n        begin\n          q,r = a.factorize_qr\n\n          expect(q).to be_within(err).of(q_solution)\n          expect(r).to be_within(err).of(r_solution)\n\n        rescue NotImplementedError\n          pending \"Suppressing a NotImplementedError when the lapacke plugin is not available\"\n        end\n      end\n\n      it \"calculates QR decomposition such that A - QR ~ 0\" do\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n        a = NMatrix.new([3,3], [ 9.0,  0.0, 26.0,\n                                12.0,  0.0, -7.0,\n                                 0.0,  4.0,  0.0] , dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-4\n                when :float64, :complex128\n                  1e-13\n              end\n\n        begin\n          q,r = a.factorize_qr\n          a_expected = q.dot(r)\n\n          expect(a_expected).to be_within(err).of(a)\n\n        rescue NotImplementedError\n          pending \"Suppressing a NotImplementedError when the lapacke plugin is not available\"\n        end\n      end\n\n\n      it \"calculates the orthogonal matrix Q in QR decomposition\" do\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n        a = N.new([2,2], [34.0, 21, 23, 53] , dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-4\n                when :float64, :complex128\n                  1e-13\n              end\n\n        begin\n          q,r = a.factorize_qr\n\n          #Q is orthogonal if Q x Q.transpose = I\n          product = q.dot(q.transpose)\n\n          expect(product[0,0]).to be_within(err).of(1)\n          expect(product[1,0]).to be_within(err).of(0)\n          expect(product[0,1]).to be_within(err).of(0)\n          expect(product[1,1]).to be_within(err).of(1)\n\n        rescue NotImplementedError\n          pending \"Suppressing a NotImplementedError when the lapacke plugin is not available\"\n        end\n      end\n    end\n  end\n\n  ALL_DTYPES.each do |dtype|\n    next if dtype == :byte #doesn't work for unsigned types\n\n    context dtype do\n      err = case dtype\n              when :float32, :complex64\n                1e-4\n              else #integer matrices will return :float64\n                1e-13\n            end\n\n      it \"should correctly invert a matrix in place (bang)\" do\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n        a = NMatrix.new(:dense, 5, [1, 8,-9, 7, 5,\n                                    0, 1, 0, 4, 4,\n                                    0, 0, 1, 2, 5,\n                                    0, 0, 0, 1,-5,\n                                    0, 0, 0, 0, 1 ], dtype)\n        b = NMatrix.new(:dense, 5, [1,-8, 9, 7, 17,\n                                    0, 1, 0,-4,-24,\n                                    0, 0, 1,-2,-15,\n                                    0, 0, 0, 1,  5,\n                                    0, 0, 0, 0,  1,], dtype)\n        if a.integer_dtype?\n          expect{a.invert!}.to raise_error(DataTypeError)\n        else\n          #should return inverse as well as modifying a\n          r = a.invert!\n          expect(a).to be_within(err).of(b)\n          expect(r).to be_within(err).of(b)\n        end\n      end\n\n\n      it \"should correctly invert a dense matrix out-of-place\" do\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n        a = NMatrix.new(:dense, 3, [1,2,3,0,1,4,5,6,0], dtype)\n\n        if a.integer_dtype?\n          b = NMatrix.new(:dense, 3, [-24,18,5,20,-15,-4,-5,4,1], :float64)\n        else\n          b = NMatrix.new(:dense, 3, [-24,18,5,20,-15,-4,-5,4,1], dtype)\n        end\n\n        expect(a.invert).to be_within(err).of(b)\n      end\n\n      it \"should correctly find exact inverse\" do\n        pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n        a = NMatrix.new(:dense, 3, [1,2,3,0,1,4,5,6,0], dtype)\n        b = NMatrix.new(:dense, 3, [-24,18,5,20,-15,-4,-5,4,1], dtype)\n\n        expect(a.exact_inverse).to be_within(err).of(b)\n      end\n\n      it \"should correctly find exact inverse\" do\n        pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n        a = NMatrix.new(:dense, 2, [1,3,3,8], dtype)\n        b = NMatrix.new(:dense, 2, [-8,3,3,-1], dtype)\n\n        expect(a.exact_inverse).to be_within(err).of(b)\n      end\n    end\n  end\n\n  NON_INTEGER_DTYPES.each do |dtype|\n    context dtype do\n      err = Complex(1e-3, 1e-3)\n      it \"should correctly invert a 2x2 matrix\" do\n        pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n        if dtype == :complex64 || dtype == :complex128\n          a = NMatrix.new([2, 2], [Complex(16, 81), Complex(91, 51), \\\n                                   Complex(13, 54), Complex(71, 24)], dtype: dtype)\n          b = NMatrix.identity(2, dtype: dtype)\n\n          begin\n            expect(a.dot(a.pinv)).to be_within(err).of(b)\n          rescue NotImplementedError\n            pending \"Suppressing a NotImplementedError when the atlas plugin is not available\"\n          end\n\n        else\n          a = NMatrix.new([2, 2], [141, 612, 9123, 654], dtype: dtype)\n          b = NMatrix.identity(2, dtype: dtype)\n\n          begin\n            expect(a.dot(a.pinv)).to be_within(err).of(b)\n          rescue NotImplementedError\n            pending \"Suppressing a NotImplementedError when the atlas plugin is not available\"\n          end\n        end\n      end\n\n      it \"should verify a.dot(b.dot(a)) == a and b.dot(a.dot(b)) == b\" do\n        pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n        if dtype == :complex64 || dtype == :complex128\n          a = NMatrix.new([3, 2], [Complex(94, 11), Complex(87, 51), Complex(82, 39), \\\n                                   Complex(45, 16), Complex(25, 32), Complex(91, 43) ], dtype: dtype)\n\n          begin\n            b = a.pinv # pseudo inverse\n            expect(a.dot(b.dot(a))).to be_within(err).of(a)\n            expect(b.dot(a.dot(b))).to be_within(err).of(b)\n          rescue NotImplementedError\n            pending \"Suppressing a NotImplementedError when the atlas plugin is not available\"\n          end\n\n        else\n          a = NMatrix.new([3, 3], [9, 4, 52, 12, 52, 1, 3, 55, 6], dtype: dtype)\n\n          begin\n            b = a.pinv # pseudo inverse\n            expect(a.dot(b.dot(a))).to be_within(err).of(a)\n            expect(b.dot(a.dot(b))).to be_within(err).of(b)\n          rescue NotImplementedError\n            pending \"Suppressing a NotImplementedError when the atlas plugin is not available\"\n          end\n        end\n      end\n    end\n  end\n\n\n  ALL_DTYPES.each do |dtype|\n    next if dtype == :byte #doesn't work for unsigned types\n\n    context dtype do\n      err = case dtype\n              when :float32, :complex64\n                1e-4\n              else #integer matrices will return :float64\n                1e-13\n            end\n\n      it \"should correctly find adjugate a matrix in place (bang)\" do\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n        a = NMatrix.new(:dense, 2, [2, 3, 3, 5], dtype)\n        b = NMatrix.new(:dense, 2, [5, -3, -3, 2], dtype)\n\n        if a.integer_dtype?\n          expect{a.adjugate!}.to raise_error(DataTypeError)\n        else\n          #should return adjugate as well as modifying a\n          r = a.adjugate!\n          expect(a).to be_within(err).of(b)\n          expect(r).to be_within(err).of(b)\n        end\n      end\n\n\n      it \"should correctly find adjugate of a matrix out-of-place\" do\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n        a = NMatrix.new(:dense, 3, [-3, 2, -5, -1, 0, -2, 3, -4, 1], dtype)\n\n        if a.integer_dtype?\n          b = NMatrix.new(:dense, 3, [-8, 18, -4, -5, 12, -1, 4, -6, 2], :float64)\n        else\n          b = NMatrix.new(:dense, 3, [-8, 18, -4, -5, 12, -1, 4, -6, 2], dtype)\n        end\n\n        expect(a.adjoint).to be_within(err).of(b)\n        expect(a.adjugate).to be_within(err).of(b)\n      end\n\n    end\n  end\n\n\n  # TODO: Get it working with ROBJ too\n  [:byte,:int8,:int16,:int32,:int64,:float32,:float64].each do |left_dtype|\n    [:byte,:int8,:int16,:int32,:int64,:float32,:float64].each do |right_dtype|\n\n      # Won't work if they're both 1-byte, due to overflow.\n      next if [:byte,:int8].include?(left_dtype) && [:byte,:int8].include?(right_dtype)\n\n      # For now, don't bother testing int-int mult.\n      #next if [:int8,:int16,:int32,:int64].include?(left_dtype) && [:int8,:int16,:int32,:int64].include?(right_dtype)\n      it \"dense handles #{left_dtype.to_s} dot #{right_dtype.to_s} matrix multiplication\" do\n        #STDERR.puts \"dtype=#{dtype.to_s}\"\n        #STDERR.puts \"2\"\n\n        nary = if left_dtype.to_s =~ /complex/\n                 COMPLEX_MATRIX43A_ARRAY\n               else\n                 MATRIX43A_ARRAY\n               end\n\n        mary = if right_dtype.to_s =~ /complex/\n                 COMPLEX_MATRIX32A_ARRAY\n               else\n                 MATRIX32A_ARRAY\n               end\n\n        n = NMatrix.new([4,3], nary, dtype: left_dtype, stype: :dense)\n        m = NMatrix.new([3,2], mary, dtype: right_dtype, stype: :dense)\n\n        expect(m.shape[0]).to eq(3)\n        expect(m.shape[1]).to eq(2)\n        expect(m.dim).to eq(2)\n\n        expect(n.shape[0]).to eq(4)\n        expect(n.shape[1]).to eq(3)\n        expect(n.dim).to eq(2)\n\n        expect(n.shape[1]).to eq(m.shape[0])\n\n        r = n.dot m\n\n        expect(r[0,0]).to eq(273.0)\n        expect(r[0,1]).to eq(455.0)\n        expect(r[1,0]).to eq(243.0)\n        expect(r[1,1]).to eq(235.0)\n        expect(r[2,0]).to eq(244.0)\n        expect(r[2,1]).to eq(205.0)\n        expect(r[3,0]).to eq(102.0)\n        expect(r[3,1]).to eq(160.0)\n\n        #r.dtype.should == :float64 unless left_dtype == :float32 && right_dtype == :float32\n      end\n    end\n  end\n\n  [:byte,:int8,:int16,:int32,:int64,:float32,:float64].each do |left_dtype|\n    [:byte,:int8,:int16,:int32,:int64,:float32,:float64].each do |right_dtype|\n\n      # Won't work if they're both 1-byte, due to overflow.\n      next if [:byte,:int8].include?(left_dtype) && [:byte,:int8].include?(right_dtype)\n\n      it \"dense handles #{left_dtype.to_s} dot #{right_dtype.to_s} vector multiplication\" do\n        #STDERR.puts \"dtype=#{dtype.to_s}\"\n        #STDERR.puts \"2\"\n        n = NMatrix.new([4,3], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0], dtype: left_dtype)\n\n        m = NMatrix.new([3,1], [2.0, 1.0, 0.0], dtype: right_dtype)\n\n        expect(m.shape[0]).to eq(3)\n        expect(m.shape[1]).to eq(1)\n\n        expect(n.shape[0]).to eq(4)\n        expect(n.shape[1]).to eq(3)\n        expect(n.dim).to eq(2)\n\n        expect(n.shape[1]).to eq(m.shape[0])\n\n        r = n.dot m\n        # r.class.should == NVector\n\n        expect(r[0,0]).to eq(4)\n        expect(r[1,0]).to eq(13)\n        expect(r[2,0]).to eq(22)\n        expect(r[3,0]).to eq(31)\n\n        #r.dtype.should == :float64 unless left_dtype == :float32 && right_dtype == :float32\n      end\n    end\n  end\n\n  ALL_DTYPES.each do |dtype|\n    next if integer_dtype?(dtype)\n    context \"#cov dtype #{dtype}\" do\n      before do\n        @n = NMatrix.new( [5,3], [4.0,2.0,0.60,\n                                  4.2,2.1,0.59,\n                                  3.9,2.0,0.58,\n                                  4.3,2.1,0.62,\n                                  4.1,2.2,0.63], dtype: dtype)\n      end\n\n      it \"calculates sample covariance matrix\" do\n        pending(\"not yet implemented for NMatrix-JRuby\") if jruby? and dtype == :object\n        expect(@n.cov).to be_within(0.0001).of(NMatrix.new([3,3],\n          [0.025  , 0.0075, 0.00175,\n           0.0075, 0.007 , 0.00135,\n           0.00175, 0.00135 , 0.00043 ], dtype: dtype)\n        )\n      end\n\n      it \"calculates population covariance matrix\" do\n        pending(\"not yet implemented for NMatrix-JRuby\") if jruby? and dtype == :object\n        expect(@n.cov(for_sample_data: false)).to be_within(0.0001).of(NMatrix.new([3,3],\n                  [2.0000e-02, 6.0000e-03, 1.4000e-03,\n                   6.0000e-03, 5.6000e-03, 1.0800e-03,\n                   1.4000e-03, 1.0800e-03, 3.4400e-04], dtype: dtype)\n                )\n      end\n    end\n\n    context \"#corr #{dtype}\" do\n      it \"calculates the correlation matrix\" do\n        pending(\"not yet implemented for NMatrix-JRuby\") if jruby? and dtype == :object\n        n = NMatrix.new([5,3], [4.0,2.0,0.60,\n                                4.2,2.1,0.59,\n                                3.9,2.0,0.58,\n                                4.3,2.1,0.62,\n                                4.1,2.2,0.63], dtype: dtype)\n        expect(n.corr).to be_within(0.001).of(NMatrix.new([3,3],\n          [1.00000, 0.56695, 0.53374,\n           0.56695, 1.00000, 0.77813,\n           0.53374, 0.77813, 1.00000], dtype: dtype))\n      end unless dtype =~ /complex/\n    end\n\n    context \"#symmetric? for #{dtype}\" do\n      it \"should return true for symmetric matrix\" do\n        n = NMatrix.new([3,3], [1.00000, 0.56695, 0.53374,\n                                0.56695, 1.00000, 0.77813,\n                                0.53374, 0.77813, 1.00000], dtype: dtype)\n        expect(n.symmetric?).to be_truthy\n      end\n    end\n\n    context \"#hermitian? for #{dtype}\" do\n      it \"should return true for complex hermitian or non-complex symmetric matrix\" do\n        n = NMatrix.new([3,3], [1.00000, 0.56695, 0.53374,\n                                0.56695, 1.00000, 0.77813,\n                                0.53374, 0.77813, 1.00000], dtype: dtype) unless dtype =~ /complex/\n        n = NMatrix.new([3,3], [1.1, Complex(1.2,1.3), Complex(1.4,1.5),\n                                Complex(1.2,-1.3), 1.9, Complex(1.8,1.7),\n                                Complex(1.4,-1.5), Complex(1.8,-1.7), 1.3], dtype: dtype) if dtype =~ /complex/\n        expect(n.hermitian?).to be_truthy\n      end\n    end\n\n    context \"#permute_columns for #{dtype}\" do\n      it \"check that #permute_columns works correctly by considering every premutation of a 3x3 matrix\" do\n        pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n        n = NMatrix.new([3,3], [1,0,0,\n                                0,2,0,\n                                0,0,3], dtype: dtype)\n        expect(n.permute_columns([0,1,2], {convention: :intuitive})).to eq(NMatrix.new([3,3], [1,0,0,\n                                                                                              0,2,0,\n                                                                                              0,0,3], dtype: dtype))\n        expect(n.permute_columns([0,2,1], {convention: :intuitive})).to eq(NMatrix.new([3,3], [1,0,0,\n                                                                                              0,0,2,\n                                                                                              0,3,0], dtype: dtype))\n        expect(n.permute_columns([1,0,2], {convention: :intuitive})).to eq(NMatrix.new([3,3], [0,1,0,\n                                                                                              2,0,0,\n                                                                                              0,0,3], dtype: dtype))\n        expect(n.permute_columns([1,2,0], {convention: :intuitive})).to eq(NMatrix.new([3,3], [0,0,1,\n                                                                                              2,0,0,\n                                                                                              0,3,0], dtype: dtype))\n        expect(n.permute_columns([2,0,1], {convention: :intuitive})).to eq(NMatrix.new([3,3], [0,1,0,\n                                                                                              0,0,2,\n                                                                                              3,0,0], dtype: dtype))\n        expect(n.permute_columns([2,1,0], {convention: :intuitive})).to eq(NMatrix.new([3,3], [0,0,1,\n                                                                                              0,2,0,\n                                                                                              3,0,0], dtype: dtype))\n        expect(n.permute_columns([0,1,2], {convention: :lapack})).to eq(NMatrix.new([3,3], [1,0,0,\n                                                                                           0,2,0,\n                                                                                           0,0,3], dtype: dtype))\n        expect(n.permute_columns([0,2,2], {convention: :lapack})).to eq(NMatrix.new([3,3], [1,0,0,\n                                                                                           0,0,2,\n                                                                                           0,3,0], dtype: dtype))\n        expect(n.permute_columns([1,1,2], {convention: :lapack})).to eq(NMatrix.new([3,3], [0,1,0,\n                                                                                           2,0,0,\n                                                                                           0,0,3], dtype: dtype))\n        expect(n.permute_columns([1,2,2], {convention: :lapack})).to eq(NMatrix.new([3,3], [0,0,1,\n                                                                                           2,0,0,\n                                                                                           0,3,0], dtype: dtype))\n        expect(n.permute_columns([2,2,2], {convention: :lapack})).to eq(NMatrix.new([3,3], [0,1,0,\n                                                                                           0,0,2,\n                                                                                           3,0,0], dtype: dtype))\n        expect(n.permute_columns([2,1,2], {convention: :lapack})).to eq(NMatrix.new([3,3], [0,0,1,\n                                                                                           0,2,0,\n                                                                                           3,0,0], dtype: dtype))\n      end\n      it \"additional tests for  #permute_columns with convention :intuitive\" do\n        pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n        m = NMatrix.new([1,4], [0,1,2,3], dtype: dtype)\n        perm = [1,0,3,2]\n        expect(m.permute_columns(perm, {convention: :intuitive})).to eq(NMatrix.new([1,4], perm, dtype: dtype))\n\n        m = NMatrix.new([1,5], [0,1,2,3,4], dtype: dtype)\n        perm = [1,0,4,3,2]\n        expect(m.permute_columns(perm, {convention: :intuitive})).to eq(NMatrix.new([1,5], perm, dtype: dtype))\n\n        m = NMatrix.new([1,6], [0,1,2,3,4,5], dtype: dtype)\n        perm = [2,4,1,0,5,3]\n        expect(m.permute_columns(perm, {convention: :intuitive})).to eq(NMatrix.new([1,6], perm, dtype: dtype))\n\n        m = NMatrix.new([1,7], [0,1,2,3,4,5,6], dtype: dtype)\n        perm = [1,3,5,6,0,2,4]\n        expect(m.permute_columns(perm, {convention: :intuitive})).to eq(NMatrix.new([1,7], perm, dtype: dtype))\n\n        m = NMatrix.new([1,8], [0,1,2,3,4,5,6,7], dtype: dtype)\n        perm = [6,7,5,4,1,3,0,2]\n        expect(m.permute_columns(perm, {convention: :intuitive})).to eq(NMatrix.new([1,8], perm, dtype: dtype))\n      end\n    end\n  end\n\n  context \"#solve\" do\n    NON_INTEGER_DTYPES.each do |dtype|\n\n      it \"solves linear equation for dtype #{dtype}\" do\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n        pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n        a = NMatrix.new [2,2], [3,1,1,2], dtype: dtype\n        b = NMatrix.new [2,1], [9,8], dtype: dtype\n\n        expect(a.solve(b)).to eq(NMatrix.new [2,1], [2,3], dtype: dtype)\n      end\n\n      it \"solves linear equation for #{dtype} (non-symmetric matrix)\" do\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n        pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n\n        a = NMatrix.new [3,3], [1,1,1, -1,0,1, 3,4,6], dtype: dtype\n        b = NMatrix.new [3,1], [6,2,29], dtype: dtype\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-5\n                else\n                  1e-14\n              end\n\n        expect(a.solve(b)).to be_within(err).of(NMatrix.new([3,1], [1,2,3], dtype: dtype))\n      end\n\n      it \"solves linear equation for dtype #{dtype} (non-vector rhs)\" do\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n        pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n\n        a = NMatrix.new [3,3], [1,0,0, -1,0,1, 2,1,1], dtype: dtype\n        b = NMatrix.new [3,2], [1,0, 1,2, 4,2], dtype: dtype\n\n        expect(a.solve(b)).to eq(NMatrix.new [3,2], [1,0, 0,0, 2,2], dtype: dtype)\n      end\n    end\n\n    FLOAT_DTYPES.each do |dtype|\n      context \"when form: :lower_tri\" do\n        let(:a) { NMatrix.new([3,3], [1, 0, 0, 2, 0.5, 0, 3, 3, 9], dtype: dtype) }\n\n        it \"solves a lower triangular linear system A * x = b with vector b\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          b = NMatrix.new([3,1], [1,2,3], dtype: dtype)\n          x = a.solve(b, form: :lower_tri)\n          r = a.dot(x) - b\n          expect(r.abs.max).to be_within(1e-6).of(0.0)\n        end\n\n        it \"solves a lower triangular linear system A * X = B with narrow B\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          b = NMatrix.new([3,2], [1,2,3,4,5,6], dtype: dtype)\n          x = a.solve(b, form: :lower_tri)\n          r = (a.dot(x) - b).abs.to_flat_a\n          expect(r.max).to be_within(1e-6).of(0.0)\n        end\n\n        it \"solves a lower triangular linear system A * X = B with wide B\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          b = NMatrix.new([3,5], (1..15).to_a, dtype: dtype)\n          x = a.solve(b, form: :lower_tri)\n          r = (a.dot(x) - b).abs.to_flat_a\n          expect(r.max).to be_within(1e-6).of(0.0)\n        end\n      end\n\n      context \"when form: :upper_tri\" do\n        let(:a) { NMatrix.new([3,3], [3, 2, 1, 0, 2, 0.5, 0, 0, 9], dtype: dtype) }\n\n        it \"solves an upper triangular linear system A * x = b with vector b\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          b = NMatrix.new([3,1], [1,2,3], dtype: dtype)\n          x = a.solve(b, form: :upper_tri)\n          r = a.dot(x) - b\n          expect(r.abs.max).to be_within(1e-6).of(0.0)\n        end\n\n        it \"solves an upper triangular linear system A * X = B with narrow B\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          b = NMatrix.new([3,2], [1,2,3,4,5,6], dtype: dtype)\n          x = a.solve(b, form: :upper_tri)\n          r = (a.dot(x) - b).abs.to_flat_a\n          expect(r.max).to be_within(1e-6).of(0.0)\n        end\n\n        it \"solves an upper triangular linear system A * X = B with a wide B\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          b = NMatrix.new([3,5], (1..15).to_a, dtype: dtype)\n          x = a.solve(b, form: :upper_tri)\n          r = (a.dot(x) - b).abs.to_flat_a\n          expect(r.max).to be_within(1e-6).of(0.0)\n        end\n      end\n\n      context \"when form: :pos_def\" do\n        let(:a) { NMatrix.new([3,3], [4, 1, 2, 1, 5, 3, 2, 3, 6], dtype: dtype) }\n\n        it \"solves a linear system A * X = b with positive definite A and vector b\" do\n          b = NMatrix.new([3,1], [6,4,8], dtype: dtype)\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          begin\n            x = a.solve(b, form: :pos_def)\n            expect(x).to be_within(1e-6).of(NMatrix.new([3,1], [1,0,1], dtype: dtype))\n          rescue NotImplementedError\n            \"Suppressing a NotImplementedError when the lapacke or atlas plugin is not available\"\n          end\n        end\n\n        it \"solves a linear system A * X = B with positive definite A and matrix B\" do\n          b = NMatrix.new([3,2], [8,3,14,13,14,19], dtype: dtype)\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          begin\n            x = a.solve(b, form: :pos_def)\n            expect(x).to be_within(1e-6).of(NMatrix.new([3,2], [1,-1,2,1,1,3], dtype: dtype))\n          rescue NotImplementedError\n            \"Suppressing a NotImplementedError when the lapacke or atlas plugin is not available\"\n          end\n        end\n      end\n    end\n  end\n\n  context \"#least_squares\" do\n    it \"finds the least squares approximation to the equation A * X = B\" do\n      pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n      a = NMatrix.new([3,2], [2.0, 0, -1, 1, 0, 2])\n      b = NMatrix.new([3,1], [1.0, 0, -1])\n      solution = NMatrix.new([2,1], [1.0 / 3 , -1.0 / 3], dtype: :float64)\n\n      begin\n        least_squares = a.least_squares(b)\n        expect(least_squares).to be_within(0.0001).of solution\n      rescue NotImplementedError\n        \"Suppressing a NotImplementedError when the lapacke or atlas plugin is not available\"\n      end\n    end\n\n    it \"finds the least squares approximation to the equation A * X = B with high tolerance\" do\n      pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n      a = NMatrix.new([4,2], [1.0, 1, 1, 2, 1, 3,1,4])\n      b = NMatrix.new([4,1], [6.0, 5, 7, 10])\n      solution = NMatrix.new([2,1], [3.5 , 1.4], dtype: :float64)\n\n      begin\n        least_squares = a.least_squares(b, tolerance: 10e-5)\n        expect(least_squares).to be_within(0.0001).of solution\n      rescue NotImplementedError\n        \"Suppressing a NotImplementedError when the lapacke or atlas plugin is not available\"\n      end\n    end\n  end\n\n  context \"#hessenberg\" do\n    FLOAT_DTYPES.each do |dtype|\n      context dtype do\n        before do\n          @n = NMatrix.new [5,5],\n            [0, 2, 0, 1, 1,\n             2, 2, 3, 2, 2,\n             4,-3, 0, 1, 3,\n             6, 1,-6,-5, 4,\n             5, 6, 4, 1, 5], dtype: dtype\n        end\n\n        it \"transforms a matrix to Hessenberg form\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          expect(@n.hessenberg).to be_within(0.0001).of(NMatrix.new([5,5],\n            [0.00000,-1.66667, 0.79432,-0.45191,-1.54501,\n            -9.00000, 2.95062,-6.89312, 3.22250,-0.19012,\n             0.00000,-8.21682,-0.57379, 5.26966,-1.69976,\n             0.00000, 0.00000,-3.74630,-0.80893, 3.99708,\n             0.00000, 0.00000, 0.00000, 0.04102, 0.43211], dtype: dtype))\n        end\n      end\n    end\n  end\n\n  ALL_DTYPES.each do |dtype|\n    [:dense, :yale].each do |stype|\n      answer_dtype = integer_dtype?(dtype) ? :int64 : dtype\n      next if dtype == :byte\n\n      context \"#pow #{dtype} #{stype}\" do\n        before do\n          @n = NMatrix.new [4,4], [0, 2, 0, 1,\n                                  2, 2, 3, 2,\n                                  4,-3, 0, 1,\n                                  6, 1,-6,-5], dtype: dtype, stype: stype\n        end\n\n        it \"raises a square matrix to even power\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby? and dtype == :object\n          expect(@n.pow(4)).to eq(NMatrix.new([4,4], [292, 28,-63, -42,\n                                                     360, 96, 51, -14,\n                                                     448,-231,-24,-87,\n                                                   -1168, 595,234, 523],\n                                                   dtype: answer_dtype,\n                                                   stype: stype))\n        end\n\n        it \"raises a square matrix to odd power\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby? and dtype == :object\n          expect(@n.pow(9)).to eq(NMatrix.new([4,4],[-275128,  279917, 176127, 237451,\n                                                    -260104,  394759, 166893,  296081,\n                                                    -704824,  285700, 186411,  262002,\n                                                    3209256,-1070870,-918741,-1318584],\n                                                    dtype: answer_dtype, stype: stype))\n        end\n\n        it \"raises a sqaure matrix to negative power\" do\n          expect(@n.pow(-3)).to be_within(0.00001).of (NMatrix.new([4,4],\n            [1.0647e-02, 4.2239e-04,-6.2281e-05, 2.7680e-03,\n            -1.6415e-02, 2.1296e-02, 1.0718e-02, 4.8589e-03,\n             8.6956e-03,-8.6569e-03, 2.8993e-02, 7.2015e-03,\n             5.0034e-02,-1.7500e-02,-3.6777e-02,-1.2128e-02], dtype: answer_dtype,\n             stype: stype))\n        end unless stype =~ /yale/ or dtype == :object or ALL_DTYPES.grep(/int/).include? dtype\n\n        it \"raises a square matrix to zero\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby? and dtype == :object\n          expect(@n.pow(0)).to eq(NMatrix.eye([4,4], dtype: answer_dtype,\n            stype: stype))\n        end\n\n        it \"raises a square matrix to one\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby? and dtype == :object\n          expect(@n.pow(1)).to eq(@n)\n        end\n      end\n    end\n  end\n\n  ALL_DTYPES.each do |dtype|\n    [:dense, :yale].each do |stype|\n      context \"#kron_prod #{dtype} #{stype}\" do\n        before do\n          @a = NMatrix.new([2,2], [1,2,\n                                   3,4], dtype: dtype, stype: stype)\n          @b = NMatrix.new([2,3], [1,1,1,\n                                   1,1,1], dtype: dtype, stype: stype)\n          @c = NMatrix.new([4,6], [1, 1, 1, 2, 2, 2,\n                                   1, 1, 1, 2, 2, 2,\n                                   3, 3, 3, 4, 4, 4,\n                                   3, 3, 3, 4, 4, 4], dtype: dtype, stype: stype)\n        end\n        it \"computes the Kronecker product of two NMatrix objects\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby? and dtype == :object\n          expect(@a.kron_prod(@b)).to eq(@c)\n        end\n      end\n    end\n  end\n\n  context \"determinants\" do\n    ALL_DTYPES.each do |dtype|\n      context dtype do\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n        before do\n          @a = NMatrix.new([2,2], [1,2,\n                                   3,4], dtype: dtype)\n          @b = NMatrix.new([3,3], [1,2,3,\n                                   5,0,1,\n                                   4,1,3], dtype: dtype)\n          @c = NMatrix.new([4,4], [1, 0, 1, 1,\n                                   1, 2, 3, 1,\n                                   3, 3, 3, 1,\n                                   1, 2, 3, 4], dtype: dtype)\n          @err = case dtype\n                  when :float32, :complex64\n                    1e-6\n                  when :float64, :complex128\n                    1e-14\n                  else\n                    1e-64 # FIXME: should be 0, but be_within(0) does not work.\n                end\n        end\n        it \"computes the determinant of 2x2 matrix\" do\n          pending(\"not yet implemented for :object dtype\") if dtype == :object\n          expect(@a.det).to be_within(@err).of(-2)\n        end\n        it \"computes the determinant of 3x3 matrix\" do\n          pending(\"not yet implemented for :object dtype\") if dtype == :object\n          expect(@b.det).to be_within(@err).of(-8)\n        end\n        it \"computes the determinant of 4x4 matrix\" do\n          pending(\"not yet implemented for :object dtype\") if dtype == :object\n          expect(@c.det).to be_within(@err).of(-18)\n        end\n        it \"computes the exact determinant of 2x2 matrix\" do\n          pending(\"not yet implemented for :object dtype\") if dtype == :object\n          if dtype == :byte\n            expect{@a.det_exact}.to raise_error(DataTypeError)\n          else\n            pending(\"not yet implemented for NMatrix-JRuby\") if jruby? and dtype == :object\n            expect(@a.det_exact).to be_within(@err).of(-2)\n          end\n        end\n        it \"computes the exact determinant of 3x3 matrix\" do\n          pending(\"not yet implemented for :object dtype\") if dtype == :objectx\n          if dtype == :byte\n            expect{@a.det_exact}.to raise_error(DataTypeError)\n          else\n            pending(\"not yet implemented for NMatrix-JRuby\") if jruby? and dtype == :object\n            expect(@b.det_exact).to be_within(@err).of(-8)\n          end\n        end\n      end\n    end\n  end\n\n  context \"#scale and #scale!\" do\n    [:dense,:list,:yale].each do |stype|\n      ALL_DTYPES.each do |dtype|\n        context \"for #{dtype}\" do\n          before do\n            @m = NMatrix.new([3, 3], [0, 1, 2,\n                                      3, 4, 5,\n                                      6, 7, 8], stype: stype, dtype: dtype)\n          end\n\n          it \"scales the matrix by a given factor and return the result\" do\n            pending(\"not yet implemented for :object dtype\") if dtype == :object\n            if integer_dtype? dtype\n              expect{@m.scale 2.0}.to raise_error(DataTypeError)\n            else\n              pending(\"not yet implemented for NMatrix-JRuby\") if jruby? and (dtype == :complex64 || dtype == :complex128)\n              expect(@m.scale 2.0).to eq(NMatrix.new([3, 3], [0,  2,  4,\n                                                             6,  8,  10,\n                                                             12, 14, 16], stype: stype, dtype: dtype))\n            end\n          end\n\n          it \"scales the matrix in place by a given factor\" do\n            pending(\"not yet implemented for :object dtype\") if dtype == :object\n            if dtype == :int8\n              expect{@m.scale! 2}.to raise_error(DataTypeError)\n            else\n              pending(\"not yet implemented for NMatrix-JRuby\") if jruby? and (dtype == :complex64 || dtype == :complex128)\n              @m.scale! 2\n              expect(@m).to eq(NMatrix.new([3, 3], [0,  2,  4,\n                                                    6,  8,  10,\n                                                    12, 14, 16], stype: stype, dtype: dtype))\n            end\n          end\n        end\n      end\n    end\n  end\n  context \"matrix_norm\" do\n    ALL_DTYPES.each do |dtype|\n      context dtype do\n        pending(\"not yet implemented for :object dtype\") if dtype == :object\n        before do\n          @n = NMatrix.new([3,3], [-4,-3,-2,\n                                   -1, 0, 1,\n                                    2, 3, 4], dtype: dtype)\n\n          @matrix_norm_TOLERANCE = 1.0e-10\n        end\n\n        it \"should default to 2-matrix_norm\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          if(dtype == :byte)\n            expect{@n.matrix_norm}.to raise_error(ArgumentError)\n          else\n            begin\n              expect(@n.matrix_norm).to be_within(@matrix_norm_TOLERANCE).of(7.348469228349535)\n\n              rescue NotImplementedError\n                pending \"Suppressing a NotImplementedError when the lapacke plugin is not available\"\n            end\n          end\n        end\n\n        it \"should reject invalid arguments\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n\n          expect{@n.matrix_norm(0.5)}.to raise_error(ArgumentError)\n        end\n\n        it \"should calculate 1 and 2(minus) matrix_norms correctly\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          if(dtype == :byte)\n              expect{@n.matrix_norm(1)}.to raise_error(ArgumentError)\n              expect{@n.matrix_norm(-2)}.to raise_error(ArgumentError)\n              expect{@n.matrix_norm(-1)}.to raise_error(ArgumentError)\n          else\n            expect(@n.matrix_norm(1)).to eq(7)\n            begin\n\n              #FIXME: change to the correct value when overflow issue is resolved\n              #expect(@n.matrix_norm(-2)).to eq(1.8628605857884395e-07)\n              expect(@n.matrix_norm(-2)).to be_within(@matrix_norm_TOLERANCE).of(0.0)\n              rescue NotImplementedError\n                pending \"Suppressing a NotImplementedError when the lapacke plugin is not available\"\n            end\n            expect(@n.matrix_norm(-1)).to eq(6)\n          end\n        end\n\n        it \"should calculate infinity matrix_norms correctly\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          if(dtype == :byte)\n            expect{@n.matrix_norm(:inf)}.to raise_error(ArgumentError)\n            expect{@n.matrix_norm(:'-inf')}.to raise_error(ArgumentError)\n          else\n            expect(@n.matrix_norm(:inf)).to eq(9)\n            expect(@n.matrix_norm(:'-inf')).to eq(2)\n          end\n        end\n\n        it \"should calculate frobenius matrix_norms correctly\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          if(dtype == :byte)\n            expect{@n.matrix_norm(:fro)}.to raise_error(ArgumentError)\n          else\n            expect(@n.matrix_norm(:fro)).to be_within(@matrix_norm_TOLERANCE).of(7.745966692414834)\n          end\n        end\n      end\n    end\n  end\n\n  context \"#positive_definite?\" do\n      it \"should return true for positive_definite? matrix\" do\n        n = NMatrix.new([3,3], [2, -1, -1,\n                                -1, 2, -1,\n                                -1, -1, 3])\n        expect(n.positive_definite?).to be_truthy\n      end\n  end\n  \n  context \"#svd_rank\" do \n    FLOAT_DTYPES.each do |dtype|\n      context dtype do\n        #examples from https://www.cliffsnotes.com/study-guides/algebra/linear-algebra/real-euclidean-vector-spaces/the-rank-of-a-matrix\n        it \"calculates the rank of matrix using singular value decomposition with NMatrix on rectangular matrix without tolerence\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          a = NMatrix.new([4,3],[2,-1,3, 1,0,1, 0,2,-1, 1,1,4], dtype: dtype)\n\n          begin\n            rank = a.svd_rank()\n          \n            rank_true = 3\n            expect(rank).to eq (rank_true)\n\n          rescue NotImplementedError\n            pending \"Suppressing a NotImplementedError when the lapacke plugin is not available\" \n          end         \n        end\n\n        it \"calculates the rank of matrix using singular value decomposition with NMatrix on rectangular matrix with tolerence\" do\n        \n          a = NMatrix.new([4,3],[2,-1,3, 1,0,1, 0,2,-1, 1,1,4], dtype: dtype)\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          begin\n            rank = a.svd_rank(4)\n  \n            rank_true = 1\n            expect(rank).to eq (rank_true)\n\n          rescue NotImplementedError\n             pending \"Suppressing a NotImplementedError when the lapacke plugin is not available\" \n          end\n        end\n\n        it \"calculates the rank of matrix using singular value decomposition with NMatrix on square matrix without tolerence\" do\n        \n          a = NMatrix.new([4,4],[1,-1,1,-1, -1,1,-1,1, 1,-1,1,-1, -1,1,-1,1], dtype: dtype)\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          begin\n            rank = a.svd_rank()\n          \n            rank_true = 1\n            expect(rank).to eq (rank_true)\n\n          rescue NotImplementedError\n             pending \"Suppressing a NotImplementedError when the lapacke plugin is not available\" \n          end\n        end\n\n        it \"calculates the rank of matrix using singular value decomposition with NMatrix on square matrix with very small tolerence(for float32)\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          a = NMatrix.new([4,4],[1,-1,1,-1, -1,1,-1,1, 1,-1,1,-1, -1,1,-1,1], dtype: :float32)\n\n          begin\n            rank = a.svd_rank(1.7881389169360773e-08)\n          \n            rank_true = 2\n            expect(rank).to eq (rank_true)\n\n          rescue NotImplementedError\n             pending \"Suppressing a NotImplementedError when the lapacke plugin is not available\" \n          end\n        end\n\n        it \"calculates the rank of matrix using singular value decomposition with NMatrix on square matrix with very small tolerence(for float64)\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          a = NMatrix.new([4,4],[1,-1,1,-1, -1,1,-1,1, 1,-1,1,-1, -1,1,-1,1], dtype: :float64)\n\n          begin\n            rank = a.svd_rank(1.7881389169360773e-08)\n          \n            rank_true = 1\n            expect(rank).to eq (rank_true)\n\n          rescue NotImplementedError\n             pending \"Suppressing a NotImplementedError when the lapacke plugin is not available\" \n          end\n        end\n\n      end\n    end \n  end \n\nend\n"
  },
  {
    "path": "spec/nmatrix_yale_resize_test_associations.yaml",
    "content": "---\n0: 0\n1: 0\n2: 1\n3: 1\n4: 2\n5: 437\n6: 2\n7: 347\n8: 3\n9: 52\n10: 3\n11: 590\n12: 3\n13: 562\n14: 562\n15: 5\n16: 5\n17: 405\n18: 603\n19: 186\n20: 7\n21: 347\n22: 7\n23: 8\n24: 497\n25: 9\n26: 570\n27: 10\n28: 10\n29: 11\n30: 11\n31: 11\n32: 12\n33: 12\n34: 13\n35: 212\n36: 248\n37: 428\n38: 458\n39: 448\n40: 428\n41: 14\n42: 14\n43: 14\n44: 14\n45: 580\n46: 15\n47: 612\n48: 453\n49: 16\n50: 328\n51: 16\n52: 41\n53: 24\n54: 616\n55: 616\n56: 616\n57: 616\n58: 616\n59: 17\n60: 165\n61: 579\n62: 19\n63: 19\n64: 19\n65: 19\n66: 19\n67: 20\n68: 20\n69: 20\n70: 572\n71: 575\n72: 21\n73: 594\n74: 22\n75: 606\n76: 22\n77: 546\n78: 23\n79: 23\n80: 515\n81: 23\n82: 24\n83: 380\n84: 25\n85: 26\n86: 26\n87: 26\n88: 26\n89: 27\n90: 27\n91: 27\n92: 27\n93: 27\n94: 27\n95: 288\n96: 27\n97: 27\n98: 28\n99: 367\n100: 29\n101: 241\n102: 241\n103: 29\n104: 30\n105: 30\n106: 30\n107: 30\n108: 30\n109: 30\n110: 30\n111: 30\n112: 30\n113: 310\n114: 30\n115: 598\n116: 30\n117: 31\n118: 31\n119: 31\n120: 175\n121: 529\n122: 31\n123: 337\n124: 31\n125: 31\n126: 31\n127: 475\n128: 31\n129: 31\n130: 31\n131: 31\n132: 31\n133: 31\n134: 96\n135: 401\n136: 31\n137: 31\n138: 470\n139: 31\n140: 31\n141: 151\n142: 32\n143: 32\n144: 32\n145: 32\n146: 32\n147: 32\n148: 32\n149: 33\n150: 33\n151: 33\n152: 278\n153: 33\n154: 34\n155: 423\n156: 34\n157: 34\n158: 458\n159: 34\n160: 549\n161: 34\n162: 151\n163: 34\n164: 34\n165: 553\n166: 34\n167: 49\n168: 34\n169: 112\n170: 34\n171: 35\n172: 35\n173: 493\n174: 551\n175: 35\n176: 35\n177: 35\n178: 520\n179: 35\n180: 578\n181: 36\n182: 36\n183: 36\n184: 36\n185: 36\n186: 36\n187: 36\n188: 36\n189: 36\n190: 36\n191: 36\n192: 36\n193: 171\n194: 519\n195: 37\n196: 38\n197: 88\n198: 195\n199: 605\n200: 39\n201: 100\n202: 40\n203: 82\n204: 465\n205: 530\n206: 322\n207: 42\n208: 42\n209: 42\n210: 42\n211: 610\n212: 503\n213: 166\n214: 44\n215: 44\n216: 263\n217: 45\n218: 558\n219: 321\n220: 45\n221: 45\n222: 45\n223: 414\n224: 367\n225: 58\n226: 46\n227: 46\n228: 46\n229: 46\n230: 47\n231: 47\n232: 292\n233: 515\n234: 48\n235: 48\n236: 48\n237: 62\n238: 237\n239: 48\n240: 48\n241: 48\n242: 48\n243: 476\n244: 537\n245: 48\n246: 48\n247: 48\n248: 48\n249: 431\n250: 51\n251: 48\n252: 48\n253: 48\n254: 49\n255: 573\n256: 49\n257: 49\n258: 123\n259: 49\n260: 49\n261: 316\n262: 401\n263: 291\n264: 51\n265: 52\n266: 476\n267: 52\n268: 52\n269: 53\n270: 53\n271: 53\n272: 333\n273: 540\n274: 53\n275: 53\n276: 53\n277: 54\n278: 54\n279: 575\n280: 54\n281: 96\n282: 55\n283: 571\n284: 55\n285: 424\n286: 458\n287: 56\n288: 56\n289: 56\n290: 56\n291: 168\n292: 56\n293: 56\n294: 300\n295: 57\n296: 509\n297: 58\n298: 58\n299: 58\n300: 58\n301: 58\n302: 58\n303: 58\n304: 58\n305: 58\n306: 58\n307: 58\n308: 59\n309: 59\n310: 59\n311: 59\n312: 287\n313: 102\n314: 59\n315: 59\n316: 59\n317: 59\n318: 59\n319: 337\n320: 212\n321: 559\n322: 580\n323: 525\n324: 464\n325: 61\n326: 61\n327: 61\n328: 590\n329: 63\n330: 138\n331: 358\n332: 64\n333: 64\n334: 64\n335: 64\n336: 64\n337: 64\n338: 64\n339: 535\n340: 358\n341: 64\n342: 395\n343: 64\n344: 64\n345: 64\n346: 65\n347: 65\n348: 522\n349: 609\n350: 65\n351: 65\n352: 66\n353: 66\n354: 66\n355: 66\n356: 66\n357: 401\n358: 176\n359: 67\n360: 67\n361: 257\n362: 360\n363: 67\n364: 92\n365: 67\n366: 96\n367: 407\n368: 543\n369: 69\n370: 69\n371: 322\n372: 70\n373: 70\n374: 70\n375: 595\n376: 595\n377: 595\n378: 72\n379: 549\n380: 72\n381: 72\n382: 580\n383: 453\n384: 74\n385: 74\n386: 74\n387: 74\n388: 74\n389: 75\n390: 75\n391: 75\n392: 75\n393: 572\n394: 520\n395: 417\n396: 75\n397: 75\n398: 75\n399: 75\n400: 349\n401: 75\n402: 75\n403: 349\n404: 75\n405: 75\n406: 75\n407: 75\n408: 349\n409: 75\n410: 551\n411: 75\n412: 75\n413: 75\n414: 75\n415: 75\n416: 75\n417: 76\n418: 76\n419: 77\n420: 77\n421: 77\n422: 77\n423: 77\n424: 77\n425: 77\n426: 78\n427: 78\n428: 79\n429: 79\n430: 79\n431: 445\n432: 79\n433: 79\n434: 79\n435: 79\n436: 79\n437: 79\n438: 79\n439: 79\n440: 79\n441: 79\n442: 79\n443: 80\n444: 571\n445: 570\n446: 80\n447: 80\n448: 80\n449: 80\n450: 81\n451: 81\n452: 82\n453: 82\n454: 83\n455: 83\n456: 83\n457: 196\n458: 83\n459: 364\n460: 322\n461: 612\n462: 492\n463: 83\n464: 83\n465: 448\n466: 83\n467: 515\n468: 448\n469: 341\n470: 196\n471: 83\n472: 83\n473: 521\n474: 83\n475: 83\n476: 84\n477: 84\n478: 85\n479: 85\n480: 85\n481: 598\n482: 579\n483: 577\n484: 87\n485: 88\n486: 88\n487: 88\n488: 88\n489: 88\n490: 88\n491: 88\n492: 88\n493: 88\n494: 88\n495: 88\n496: 88\n497: 88\n498: 338\n499: 89\n500: 90\n501: 516\n502: 91\n503: 403\n504: 92\n505: 93\n506: 93\n507: 93\n508: 94\n509: 333\n510: 95\n511: 95\n512: 95\n513: 211\n514: 95\n515: 95\n516: 96\n517: 318\n518: 228\n519: 96\n520: 96\n521: 526\n522: 96\n523: 96\n524: 427\n525: 96\n526: 96\n527: 96\n528: 607\n529: 96\n530: 96\n531: 534\n532: 96\n533: 96\n534: 96\n535: 96\n536: 395\n537: 96\n538: 395\n539: 96\n540: 149\n541: 534\n542: 96\n543: 583\n544: 96\n545: 96\n546: 96\n547: 96\n548: 395\n549: 96\n550: 407\n551: 96\n552: 97\n553: 97\n554: 98\n555: 98\n556: 581\n557: 431\n558: 99\n559: 100\n560: 100\n561: 100\n562: 100\n563: 100\n564: 407\n565: 100\n566: 100\n567: 100\n568: 100\n569: 374\n570: 100\n571: 100\n572: 100\n573: 100\n574: 100\n575: 100\n576: 100\n577: 100\n578: 100\n579: 100\n580: 100\n581: 100\n582: 100\n583: 100\n584: 100\n585: 100\n586: 100\n587: 100\n588: 100\n589: 100\n590: 100\n591: 100\n592: 100\n593: 464\n594: 419\n595: 248\n596: 287\n597: 102\n598: 392\n599: 102\n600: 102\n601: 102\n602: 102\n603: 102\n604: 102\n605: 102\n606: 102\n607: 102\n608: 102\n609: 103\n610: 508\n611: 420\n612: 606\n613: 466\n614: 554\n615: 104\n616: 105\n617: 105\n618: 105\n619: 106\n620: 347\n621: 107\n622: 107\n623: 107\n624: 108\n625: 108\n626: 109\n627: 615\n628: 615\n629: 594\n630: 110\n631: 110\n632: 110\n633: 110\n634: 110\n635: 110\n636: 110\n637: 110\n638: 111\n639: 112\n640: 112\n641: 112\n642: 112\n643: 112\n644: 112\n645: 112\n646: 112\n647: 423\n648: 112\n649: 250\n650: 466\n651: 112\n652: 423\n653: 112\n654: 527\n655: 112\n656: 112\n657: 407\n658: 527\n659: 112\n660: 423\n661: 112\n662: 112\n663: 423\n664: 231\n665: 113\n666: 113\n667: 369\n668: 114\n669: 114\n670: 114\n671: 115\n672: 115\n673: 115\n674: 192\n675: 116\n676: 229\n677: 488\n678: 117\n679: 117\n680: 117\n681: 117\n682: 118\n683: 118\n684: 118\n685: 118\n686: 118\n687: 118\n688: 118\n689: 118\n690: 118\n691: 118\n692: 118\n693: 118\n694: 119\n695: 119\n696: 119\n697: 228\n698: 120\n699: 120\n700: 419\n701: 515\n702: 121\n703: 121\n704: 121\n705: 438\n706: 122\n707: 123\n708: 123\n709: 554\n710: 123\n711: 124\n712: 124\n713: 374\n714: 126\n715: 347\n716: 320\n717: 126\n718: 126\n719: 126\n720: 126\n721: 126\n722: 126\n723: 480\n724: 126\n725: 126\n726: 126\n727: 126\n728: 126\n729: 126\n730: 509\n731: 126\n732: 332\n733: 379\n734: 379\n735: 126\n736: 388\n737: 388\n738: 126\n739: 126\n740: 248\n741: 528\n742: 126\n743: 573\n744: 126\n745: 126\n746: 448\n747: 126\n748: 126\n749: 126\n750: 341\n751: 126\n752: 158\n753: 341\n754: 127\n755: 127\n756: 127\n757: 127\n758: 127\n759: 128\n760: 128\n761: 518\n762: 129\n763: 426\n764: 129\n765: 129\n766: 317\n767: 129\n768: 560\n769: 317\n770: 560\n771: 347\n772: 518\n773: 518\n774: 129\n775: 518\n776: 129\n777: 130\n778: 597\n779: 130\n780: 130\n781: 130\n782: 130\n783: 130\n784: 131\n785: 131\n786: 131\n787: 131\n788: 309\n789: 131\n790: 131\n791: 131\n792: 131\n793: 131\n794: 131\n795: 132\n796: 132\n797: 132\n798: 133\n799: 133\n800: 133\n801: 476\n802: 133\n803: 133\n804: 133\n805: 133\n806: 133\n807: 133\n808: 592\n809: 133\n810: 133\n811: 133\n812: 552\n813: 133\n814: 133\n815: 236\n816: 134\n817: 134\n818: 135\n819: 135\n820: 135\n821: 135\n822: 466\n823: 466\n824: 580\n825: 457\n826: 138\n827: 138\n828: 138\n829: 138\n830: 138\n831: 138\n832: 138\n833: 606\n834: 139\n835: 139\n836: 139\n837: 298\n838: 522\n839: 532\n840: 141\n841: 141\n842: 499\n843: 143\n844: 143\n845: 144\n846: 144\n847: 144\n848: 144\n849: 144\n850: 347\n851: 144\n852: 454\n853: 145\n854: 146\n855: 146\n856: 147\n857: 147\n858: 148\n859: 148\n860: 149\n861: 149\n862: 149\n863: 149\n864: 149\n865: 149\n866: 149\n867: 149\n868: 149\n869: 149\n870: 149\n871: 149\n872: 149\n873: 149\n874: 149\n875: 149\n876: 149\n877: 149\n878: 149\n879: 149\n880: 149\n881: 149\n882: 149\n883: 149\n884: 149\n885: 149\n886: 149\n887: 149\n888: 597\n889: 149\n890: 149\n891: 149\n892: 149\n893: 149\n894: 149\n895: 597\n896: 149\n897: 149\n898: 597\n899: 149\n900: 149\n901: 149\n902: 149\n903: 149\n904: 149\n905: 597\n906: 149\n907: 149\n908: 149\n909: 149\n910: 149\n911: 149\n912: 149\n913: 149\n914: 149\n915: 149\n916: 149\n917: 149\n918: 149\n919: 149\n920: 169\n921: 604\n922: 149\n923: 149\n924: 149\n925: 149\n926: 190\n927: 149\n928: 149\n929: 149\n930: 149\n931: 149\n932: 149\n933: 149\n934: 149\n935: 149\n936: 149\n937: 346\n938: 149\n939: 149\n940: 563\n941: 597\n942: 149\n943: 149\n944: 149\n945: 149\n946: 149\n947: 149\n948: 149\n949: 149\n950: 150\n951: 568\n952: 151\n953: 458\n954: 151\n955: 415\n956: 152\n957: 290\n958: 153\n959: 153\n960: 153\n961: 153\n962: 153\n963: 154\n964: 154\n965: 155\n966: 155\n967: 155\n968: 156\n969: 156\n970: 156\n971: 157\n972: 157\n973: 157\n974: 157\n975: 157\n976: 157\n977: 157\n978: 157\n979: 158\n980: 158\n981: 159\n982: 458\n983: 586\n984: 586\n985: 161\n986: 162\n987: 162\n988: 163\n989: 163\n990: 163\n991: 164\n992: 579\n993: 166\n994: 166\n995: 166\n996: 166\n997: 167\n998: 167\n999: 167\n1000: 167\n1001: 167\n1002: 167\n1003: 167\n1004: 168\n1005: 168\n1006: 459\n1007: 168\n1008: 168\n1009: 168\n1010: 168\n1011: 168\n1012: 168\n1013: 168\n1014: 168\n1015: 271\n1016: 168\n1017: 168\n1018: 168\n1019: 169\n1020: 169\n1021: 288\n1022: 169\n1023: 169\n1024: 169\n1025: 169\n1026: 170\n1027: 171\n1028: 171\n1029: 171\n1030: 171\n1031: 445\n1032: 171\n1033: 171\n1034: 445\n1035: 171\n1036: 551\n1037: 172\n1038: 173\n1039: 173\n1040: 174\n1041: 502\n1042: 176\n1043: 176\n1044: 176\n1045: 176\n1046: 176\n1047: 177\n1048: 177\n1049: 177\n1050: 178\n1051: 178\n1052: 179\n1053: 179\n1054: 180\n1055: 180\n1056: 181\n1057: 181\n1058: 182\n1059: 182\n1060: 183\n1061: 183\n1062: 183\n1063: 183\n1064: 183\n1065: 183\n1066: 395\n1067: 183\n1068: 183\n1069: 183\n1070: 183\n1071: 184\n1072: 310\n1073: 405\n1074: 185\n1075: 185\n1076: 185\n1077: 185\n1078: 185\n1079: 185\n1080: 185\n1081: 185\n1082: 185\n1083: 185\n1084: 185\n1085: 185\n1086: 185\n1087: 185\n1088: 185\n1089: 185\n1090: 185\n1091: 185\n1092: 220\n1093: 185\n1094: 185\n1095: 185\n1096: 185\n1097: 424\n1098: 185\n1099: 367\n1100: 185\n1101: 185\n1102: 185\n1103: 185\n1104: 185\n1105: 185\n1106: 185\n1107: 402\n1108: 185\n1109: 185\n1110: 185\n1111: 377\n1112: 187\n1113: 185\n1114: 185\n1115: 185\n1116: 185\n1117: 298\n1118: 185\n1119: 402\n1120: 185\n1121: 185\n1122: 185\n1123: 185\n1124: 185\n1125: 186\n1126: 558\n1127: 249\n1128: 249\n1129: 186\n1130: 187\n1131: 187\n1132: 187\n1133: 187\n1134: 187\n1135: 187\n1136: 576\n1137: 576\n1138: 189\n1139: 189\n1140: 189\n1141: 576\n1142: 190\n1143: 190\n1144: 604\n1145: 191\n1146: 191\n1147: 191\n1148: 191\n1149: 337\n1150: 192\n1151: 192\n1152: 193\n1153: 193\n1154: 193\n1155: 194\n1156: 367\n1157: 194\n1158: 194\n1159: 195\n1160: 195\n1161: 195\n1162: 195\n1163: 196\n1164: 196\n1165: 196\n1166: 196\n1167: 196\n1168: 196\n1169: 196\n1170: 196\n1171: 196\n1172: 196\n1173: 196\n1174: 196\n1175: 197\n1176: 197\n1177: 198\n1178: 198\n1179: 198\n1180: 198\n1181: 198\n1182: 199\n1183: 199\n1184: 199\n1185: 199\n1186: 199\n1187: 199\n1188: 200\n1189: 200\n1190: 200\n1191: 200\n1192: 429\n1193: 201\n1194: 201\n1195: 202\n1196: 591\n1197: 518\n1198: 203\n1199: 612\n1200: 204\n1201: 204\n1202: 205\n1203: 205\n1204: 206\n1205: 206\n1206: 207\n1207: 207\n1208: 208\n1209: 208\n1210: 209\n1211: 209\n1212: 210\n1213: 210\n1214: 210\n1215: 545\n1216: 211\n1217: 211\n1218: 212\n1219: 212\n1220: 492\n1221: 428\n1222: 471\n1223: 213\n1224: 214\n1225: 368\n1226: 215\n1227: 215\n1228: 217\n1229: 578\n1230: 437\n1231: 464\n1232: 218\n1233: 218\n1234: 218\n1235: 218\n1236: 218\n1237: 218\n1238: 219\n1239: 218\n1240: 218\n1241: 218\n1242: 218\n1243: 219\n1244: 220\n1245: 220\n1246: 220\n1247: 220\n1248: 360\n1249: 221\n1250: 221\n1251: 222\n1252: 222\n1253: 222\n1254: 223\n1255: 223\n1256: 224\n1257: 225\n1258: 225\n1259: 225\n1260: 225\n1261: 226\n1262: 226\n1263: 227\n1264: 227\n1265: 227\n1266: 227\n1267: 228\n1268: 228\n1269: 228\n1270: 228\n1271: 228\n1272: 228\n1273: 228\n1274: 228\n1275: 228\n1276: 229\n1277: 229\n1278: 229\n1279: 582\n1280: 230\n1281: 230\n1282: 230\n1283: 231\n1284: 232\n1285: 232\n1286: 232\n1287: 233\n1288: 233\n1289: 233\n1290: 234\n1291: 322\n1292: 234\n1293: 541\n1294: 235\n1295: 237\n1296: 522\n1297: 237\n1298: 397\n1299: 237\n1300: 247\n1301: 237\n1302: 238\n1303: 240\n1304: 240\n1305: 340\n1306: 240\n1307: 242\n1308: 242\n1309: 242\n1310: 242\n1311: 243\n1312: 243\n1313: 243\n1314: 243\n1315: 243\n1316: 243\n1317: 243\n1318: 401\n1319: 243\n1320: 243\n1321: 244\n1322: 245\n1323: 245\n1324: 341\n1325: 245\n1326: 579\n1327: 247\n1328: 247\n1329: 539\n1330: 248\n1331: 248\n1332: 250\n1333: 250\n1334: 250\n1335: 250\n1336: 251\n1337: 251\n1338: 515\n1339: 251\n1340: 251\n1341: 251\n1342: 251\n1343: 251\n1344: 252\n1345: 252\n1346: 253\n1347: 253\n1348: 254\n1349: 254\n1350: 254\n1351: 255\n1352: 255\n1353: 255\n1354: 255\n1355: 255\n1356: 255\n1357: 255\n1358: 255\n1359: 255\n1360: 255\n1361: 330\n1362: 255\n1363: 256\n1364: 256\n1365: 256\n1366: 606\n1367: 274\n1368: 474\n1369: 257\n1370: 257\n1371: 257\n1372: 288\n1373: 257\n1374: 257\n1375: 419\n1376: 372\n1377: 258\n1378: 258\n1379: 554\n1380: 259\n1381: 259\n1382: 260\n1383: 260\n1384: 261\n1385: 261\n1386: 261\n1387: 486\n1388: 262\n1389: 262\n1390: 263\n1391: 263\n1392: 263\n1393: 263\n1394: 335\n1395: 263\n1396: 278\n1397: 264\n1398: 264\n1399: 383\n1400: 264\n1401: 264\n1402: 265\n1403: 265\n1404: 265\n1405: 265\n1406: 343\n1407: 265\n1408: 280\n1409: 266\n1410: 266\n1411: 266\n1412: 267\n1413: 267\n1414: 592\n1415: 269\n1416: 269\n1417: 279\n1418: 270\n1419: 270\n1420: 271\n1421: 271\n1422: 271\n1423: 271\n1424: 272\n1425: 272\n1426: 273\n1427: 515\n1428: 349\n1429: 274\n1430: 274\n1431: 508\n1432: 274\n1433: 274\n1434: 274\n1435: 274\n1436: 274\n1437: 274\n1438: 274\n1439: 274\n1440: 274\n1441: 274\n1442: 274\n1443: 274\n1444: 274\n1445: 274\n1446: 508\n1447: 274\n1448: 496\n1449: 274\n1450: 580\n1451: 276\n1452: 276\n1453: 277\n1454: 518\n1455: 277\n1456: 277\n1457: 278\n1458: 279\n1459: 280\n1460: 485\n1461: 281\n1462: 281\n1463: 281\n1464: 281\n1465: 281\n1466: 307\n1467: 281\n1468: 352\n1469: 409\n1470: 282\n1471: 283\n1472: 283\n1473: 283\n1474: 283\n1475: 283\n1476: 284\n1477: 284\n1478: 284\n1479: 284\n1480: 284\n1481: 285\n1482: 592\n1483: 285\n1484: 285\n1485: 285\n1486: 285\n1487: 285\n1488: 286\n1489: 553\n1490: 287\n1491: 287\n1492: 287\n1493: 287\n1494: 287\n1495: 288\n1496: 288\n1497: 288\n1498: 288\n1499: 288\n1500: 288\n1501: 289\n1502: 596\n1503: 290\n1504: 290\n1505: 290\n1506: 290\n1507: 290\n1508: 290\n1509: 290\n1510: 290\n1511: 290\n1512: 290\n1513: 290\n1514: 291\n1515: 291\n1516: 341\n1517: 293\n1518: 293\n1519: 293\n1520: 293\n1521: 293\n1522: 293\n1523: 293\n1524: 293\n1525: 293\n1526: 293\n1527: 539\n1528: 293\n1529: 293\n1530: 294\n1531: 294\n1532: 295\n1533: 295\n1534: 295\n1535: 296\n1536: 296\n1537: 296\n1538: 296\n1539: 296\n1540: 296\n1541: 296\n1542: 296\n1543: 296\n1544: 296\n1545: 296\n1546: 296\n1547: 296\n1548: 296\n1549: 296\n1550: 296\n1551: 296\n1552: 296\n1553: 296\n1554: 572\n1555: 296\n1556: 296\n1557: 296\n1558: 360\n1559: 459\n1560: 299\n1561: 299\n1562: 299\n1563: 300\n1564: 302\n1565: 302\n1566: 302\n1567: 333\n1568: 305\n1569: 305\n1570: 358\n1571: 358\n1572: 535\n1573: 480\n1574: 480\n1575: 307\n1576: 307\n1577: 309\n1578: 309\n1579: 309\n1580: 310\n1581: 310\n1582: 310\n1583: 311\n1584: 311\n1585: 312\n1586: 313\n1587: 314\n1588: 314\n1589: 314\n1590: 315\n1591: 315\n1592: 316\n1593: 317\n1594: 317\n1595: 518\n1596: 317\n1597: 518\n1598: 518\n1599: 317\n1600: 317\n1601: 518\n1602: 318\n1603: 506\n1604: 318\n1605: 380\n1606: 380\n1607: 318\n1608: 380\n1609: 355\n1610: 318\n1611: 318\n1612: 318\n1613: 318\n1614: 319\n1615: 319\n1616: 500\n1617: 320\n1618: 320\n1619: 321\n1620: 321\n1621: 322\n1622: 322\n1623: 322\n1624: 322\n1625: 322\n1626: 322\n1627: 322\n1628: 322\n1629: 322\n1630: 322\n1631: 322\n1632: 322\n1633: 322\n1634: 322\n1635: 322\n1636: 322\n1637: 322\n1638: 322\n1639: 429\n1640: 322\n1641: 322\n1642: 322\n1643: 322\n1644: 322\n1645: 322\n1646: 322\n1647: 322\n1648: 322\n1649: 448\n1650: 322\n1651: 329\n1652: 522\n1653: 322\n1654: 322\n1655: 322\n1656: 322\n1657: 322\n1658: 322\n1659: 322\n1660: 322\n1661: 322\n1662: 322\n1663: 322\n1664: 322\n1665: 322\n1666: 322\n1667: 322\n1668: 322\n1669: 322\n1670: 322\n1671: 322\n1672: 322\n1673: 322\n1674: 322\n1675: 322\n1676: 322\n1677: 549\n1678: 323\n1679: 323\n1680: 323\n1681: 323\n1682: 449\n1683: 324\n1684: 324\n1685: 449\n1686: 449\n1687: 324\n1688: 324\n1689: 325\n1690: 453\n1691: 496\n1692: 496\n1693: 327\n1694: 327\n1695: 423\n1696: 327\n1697: 328\n1698: 329\n1699: 329\n1700: 566\n1701: 566\n1702: 331\n1703: 331\n1704: 332\n1705: 540\n1706: 540\n1707: 333\n1708: 333\n1709: 334\n1710: 334\n1711: 335\n1712: 336\n1713: 563\n1714: 336\n1715: 336\n1716: 442\n1717: 424\n1718: 337\n1719: 337\n1720: 337\n1721: 461\n1722: 337\n1723: 337\n1724: 337\n1725: 337\n1726: 337\n1727: 338\n1728: 600\n1729: 591\n1730: 340\n1731: 340\n1732: 341\n1733: 341\n1734: 492\n1735: 341\n1736: 341\n1737: 342\n1738: 342\n1739: 453\n1740: 509\n1741: 343\n1742: 344\n1743: 345\n1744: 345\n1745: 347\n1746: 347\n1747: 347\n1748: 347\n1749: 347\n1750: 347\n1751: 347\n1752: 560\n1753: 347\n1754: 347\n1755: 347\n1756: 603\n1757: 348\n1758: 348\n1759: 348\n1760: 348\n1761: 348\n1762: 348\n1763: 348\n1764: 348\n1765: 348\n1766: 348\n1767: 348\n1768: 348\n1769: 572\n1770: 590\n1771: 349\n1772: 349\n1773: 349\n1774: 350\n1775: 350\n1776: 350\n1777: 351\n1778: 351\n1779: 351\n1780: 352\n1781: 352\n1782: 352\n1783: 352\n1784: 352\n1785: 353\n1786: 354\n1787: 354\n1788: 355\n1789: 355\n1790: 355\n1791: 357\n1792: 357\n1793: 357\n1794: 359\n1795: 359\n1796: 360\n1797: 361\n1798: 361\n1799: 362\n1800: 362\n1801: 363\n1802: 363\n1803: 364\n1804: 364\n1805: 364\n1806: 574\n1807: 364\n1808: 364\n1809: 606\n1810: 365\n1811: 366\n1812: 366\n1813: 422\n1814: 366\n1815: 367\n1816: 367\n1817: 367\n1818: 367\n1819: 367\n1820: 367\n1821: 367\n1822: 367\n1823: 367\n1824: 367\n1825: 367\n1826: 367\n1827: 367\n1828: 367\n1829: 367\n1830: 367\n1831: 367\n1832: 367\n1833: 367\n1834: 367\n1835: 367\n1836: 367\n1837: 367\n1838: 367\n1839: 367\n1840: 367\n1841: 367\n1842: 367\n1843: 367\n1844: 367\n1845: 367\n1846: 367\n1847: 367\n1848: 367\n1849: 367\n1850: 367\n1851: 367\n1852: 367\n1853: 367\n1854: 367\n1855: 367\n1856: 367\n1857: 367\n1858: 367\n1859: 367\n1860: 367\n1861: 367\n1862: 367\n1863: 367\n1864: 367\n1865: 367\n1866: 367\n1867: 367\n1868: 367\n1869: 367\n1870: 367\n1871: 367\n1872: 367\n1873: 367\n1874: 367\n1875: 367\n1876: 367\n1877: 367\n1878: 367\n1879: 367\n1880: 367\n1881: 367\n1882: 367\n1883: 367\n1884: 367\n1885: 367\n1886: 367\n1887: 367\n1888: 367\n1889: 367\n1890: 367\n1891: 367\n1892: 367\n1893: 367\n1894: 367\n1895: 367\n1896: 367\n1897: 367\n1898: 367\n1899: 368\n1900: 369\n1901: 558\n1902: 371\n1903: 371\n1904: 371\n1905: 371\n1906: 371\n1907: 372\n1908: 372\n1909: 372\n1910: 372\n1911: 372\n1912: 372\n1913: 373\n1914: 373\n1915: 373\n1916: 373\n1917: 373\n1918: 373\n1919: 373\n1920: 373\n1921: 373\n1922: 373\n1923: 374\n1924: 374\n1925: 374\n1926: 438\n1927: 374\n1928: 374\n1929: 437\n1930: 374\n1931: 374\n1932: 374\n1933: 374\n1934: 374\n1935: 374\n1936: 374\n1937: 374\n1938: 374\n1939: 374\n1940: 375\n1941: 375\n1942: 376\n1943: 376\n1944: 377\n1945: 377\n1946: 377\n1947: 377\n1948: 377\n1949: 377\n1950: 377\n1951: 377\n1952: 377\n1953: 377\n1954: 377\n1955: 378\n1956: 378\n1957: 378\n1958: 571\n1959: 378\n1960: 378\n1961: 378\n1962: 378\n1963: 380\n1964: 380\n1965: 380\n1966: 380\n1967: 380\n1968: 380\n1969: 380\n1970: 380\n1971: 380\n1972: 380\n1973: 380\n1974: 380\n1975: 380\n1976: 380\n1977: 380\n1978: 554\n1979: 380\n1980: 380\n1981: 381\n1982: 381\n1983: 381\n1984: 382\n1985: 382\n1986: 383\n1987: 383\n1988: 384\n1989: 384\n1990: 384\n1991: 385\n1992: 385\n1993: 385\n1994: 386\n1995: 386\n1996: 387\n1997: 387\n1998: 387\n1999: 389\n2000: 390\n2001: 390\n2002: 396\n2003: 396\n2004: 396\n2005: 392\n2006: 539\n2007: 392\n2008: 532\n2009: 392\n2010: 392\n2011: 393\n2012: 393\n2013: 394\n2014: 607\n2015: 395\n2016: 395\n2017: 395\n2018: 395\n2019: 407\n2020: 395\n2021: 395\n2022: 395\n2023: 407\n2024: 607\n2025: 607\n2026: 395\n2027: 395\n2028: 395\n2029: 396\n2030: 396\n2031: 396\n2032: 396\n2033: 396\n2034: 396\n2035: 396\n2036: 396\n2037: 397\n2038: 397\n2039: 397\n2040: 397\n2041: 397\n2042: 397\n2043: 399\n2044: 399\n2045: 399\n2046: 400\n2047: 400\n2048: 401\n2049: 401\n2050: 401\n2051: 579\n2052: 401\n2053: 401\n2054: 401\n2055: 401\n2056: 401\n2057: 402\n2058: 403\n2059: 404\n2060: 404\n2061: 578\n2062: 404\n2063: 404\n2064: 404\n2065: 406\n2066: 574\n2067: 407\n2068: 407\n2069: 407\n2070: 407\n2071: 407\n2072: 407\n2073: 407\n2074: 407\n2075: 407\n2076: 534\n2077: 407\n2078: 408\n2079: 408\n2080: 408\n2081: 408\n2082: 408\n2083: 408\n2084: 408\n2085: 408\n2086: 408\n2087: 408\n2088: 408\n2089: 408\n2090: 408\n2091: 408\n2092: 409\n2093: 409\n2094: 409\n2095: 410\n2096: 410\n2097: 411\n2098: 411\n2099: 411\n2100: 411\n2101: 411\n2102: 412\n2103: 413\n2104: 413\n2105: 414\n2106: 414\n2107: 414\n2108: 414\n2109: 415\n2110: 415\n2111: 415\n2112: 508\n2113: 416\n2114: 416\n2115: 535\n2116: 416\n2117: 417\n2118: 417\n2119: 417\n2120: 418\n2121: 418\n2122: 418\n2123: 418\n2124: 419\n2125: 420\n2126: 420\n2127: 420\n2128: 421\n2129: 421\n2130: 492\n2131: 422\n2132: 423\n2133: 423\n2134: 423\n2135: 423\n2136: 496\n2137: 423\n2138: 423\n2139: 423\n2140: 423\n2141: 424\n2142: 425\n2143: 425\n2144: 425\n2145: 518\n2146: 518\n2147: 426\n2148: 426\n2149: 426\n2150: 426\n2151: 427\n2152: 428\n2153: 428\n2154: 429\n2155: 430\n2156: 430\n2157: 430\n2158: 430\n2159: 430\n2160: 431\n2161: 431\n2162: 431\n2163: 431\n2164: 431\n2165: 441\n2166: 431\n2167: 431\n2168: 431\n2169: 431\n2170: 431\n2171: 431\n2172: 431\n2173: 431\n2174: 431\n2175: 431\n2176: 431\n2177: 431\n2178: 431\n2179: 431\n2180: 431\n2181: 431\n2182: 431\n2183: 431\n2184: 431\n2185: 432\n2186: 546\n2187: 433\n2188: 434\n2189: 518\n2190: 539\n2191: 539\n2192: 539\n2193: 436\n2194: 436\n2195: 508\n2196: 437\n2197: 437\n2198: 437\n2199: 437\n2200: 438\n2201: 438\n2202: 438\n2203: 555\n2204: 617\n2205: 584\n2206: 438\n2207: 438\n2208: 439\n2209: 439\n2210: 440\n2211: 442\n2212: 442\n2213: 442\n2214: 443\n2215: 443\n2216: 444\n2217: 451\n2218: 444\n2219: 444\n2220: 444\n2221: 444\n2222: 444\n2223: 445\n2224: 445\n2225: 445\n2226: 445\n2227: 524\n2228: 445\n2229: 447\n2230: 447\n2231: 447\n2232: 447\n2233: 447\n2234: 447\n2235: 447\n2236: 448\n2237: 448\n2238: 487\n2239: 448\n2240: 448\n2241: 448\n2242: 448\n2243: 452\n2244: 450\n2245: 450\n2246: 450\n2247: 451\n2248: 451\n2249: 451\n2250: 451\n2251: 452\n2252: 452\n2253: 453\n2254: 453\n2255: 453\n2256: 453\n2257: 453\n2258: 453\n2259: 454\n2260: 454\n2261: 454\n2262: 454\n2263: 515\n2264: 455\n2265: 455\n2266: 455\n2267: 456\n2268: 457\n2269: 458\n2270: 458\n2271: 458\n2272: 458\n2273: 458\n2274: 458\n2275: 458\n2276: 458\n2277: 458\n2278: 458\n2279: 458\n2280: 458\n2281: 458\n2282: 458\n2283: 458\n2284: 459\n2285: 459\n2286: 459\n2287: 459\n2288: 460\n2289: 460\n2290: 461\n2291: 462\n2292: 462\n2293: 462\n2294: 463\n2295: 463\n2296: 509\n2297: 467\n2298: 467\n2299: 468\n2300: 468\n2301: 469\n2302: 469\n2303: 469\n2304: 472\n2305: 472\n2306: 473\n2307: 473\n2308: 474\n2309: 474\n2310: 474\n2311: 474\n2312: 474\n2313: 474\n2314: 474\n2315: 598\n2316: 475\n2317: 476\n2318: 477\n2319: 477\n2320: 477\n2321: 477\n2322: 477\n2323: 477\n2324: 477\n2325: 479\n2326: 479\n2327: 481\n2328: 481\n2329: 481\n2330: 481\n2331: 482\n2332: 482\n2333: 482\n2334: 482\n2335: 482\n2336: 483\n2337: 483\n2338: 483\n2339: 483\n2340: 484\n2341: 484\n2342: 485\n2343: 486\n2344: 486\n2345: 486\n2346: 486\n2347: 487\n2348: 487\n2349: 487\n2350: 487\n2351: 487\n2352: 488\n2353: 489\n2354: 554\n2355: 490\n2356: 490\n2357: 491\n2358: 491\n2359: 492\n2360: 492\n2361: 492\n2362: 492\n2363: 492\n2364: 492\n2365: 492\n2366: 492\n2367: 492\n2368: 492\n2369: 492\n2370: 492\n2371: 492\n2372: 492\n2373: 492\n2374: 492\n2375: 492\n2376: 492\n2377: 492\n2378: 492\n2379: 492\n2380: 492\n2381: 493\n2382: 494\n2383: 494\n2384: 495\n2385: 495\n2386: 496\n2387: 496\n2388: 496\n2389: 497\n2390: 497\n2391: 498\n2392: 498\n2393: 601\n2394: 498\n2395: 499\n2396: 499\n2397: 499\n2398: 499\n2399: 499\n2400: 499\n2401: 499\n2402: 499\n2403: 499\n2404: 499\n2405: 499\n2406: 501\n2407: 501\n2408: 501\n2409: 502\n2410: 502\n2411: 503\n2412: 503\n2413: 504\n2414: 504\n2415: 505\n2416: 505\n2417: 505\n2418: 505\n2419: 505\n2420: 506\n2421: 506\n2422: 506\n2423: 506\n2424: 506\n2425: 616\n2426: 507\n2427: 507\n2428: 523\n2429: 508\n2430: 508\n2431: 508\n2432: 510\n2433: 509\n2434: 509\n2435: 509\n2436: 509\n2437: 509\n2438: 509\n2439: 510\n2440: 511\n2441: 511\n2442: 511\n2443: 512\n2444: 512\n2445: 512\n2446: 512\n2447: 512\n2448: 512\n2449: 513\n2450: 513\n2451: 513\n2452: 513\n2453: 514\n2454: 514\n2455: 518\n2456: 515\n2457: 515\n2458: 515\n2459: 515\n2460: 515\n2461: 515\n2462: 515\n2463: 515\n2464: 515\n2465: 515\n2466: 515\n2467: 515\n2468: 515\n2469: 515\n2470: 515\n2471: 515\n2472: 515\n2473: 515\n2474: 515\n2475: 516\n2476: 516\n2477: 516\n2478: 517\n2479: 517\n2480: 517\n2481: 518\n2482: 518\n2483: 518\n2484: 597\n2485: 518\n2486: 518\n2487: 518\n2488: 518\n2489: 518\n2490: 518\n2491: 518\n2492: 518\n2493: 518\n2494: 518\n2495: 518\n2496: 518\n2497: 518\n2498: 518\n2499: 518\n2500: 518\n2501: 518\n2502: 518\n2503: 518\n2504: 518\n2505: 518\n2506: 518\n2507: 518\n2508: 518\n2509: 518\n2510: 519\n2511: 520\n2512: 521\n2513: 521\n2514: 522\n2515: 522\n2516: 522\n2517: 522\n2518: 522\n2519: 572\n2520: 572\n2521: 523\n2522: 524\n2523: 525\n2524: 526\n2525: 528\n2526: 529\n2527: 530\n2528: 531\n2529: 531\n2530: 531\n2531: 532\n2532: 532\n2533: 533\n2534: 533\n2535: 533\n2536: 533\n2537: 535\n2538: 535\n2539: 537\n2540: 538\n2541: 538\n2542: 539\n2543: 539\n2544: 539\n2545: 539\n2546: 539\n2547: 539\n2548: 539\n2549: 541\n2550: 541\n2551: 541\n2552: 541\n2553: 542\n2554: 543\n2555: 544\n2556: 612\n2557: 545\n2558: 546\n2559: 546\n2560: 546\n2561: 546\n2562: 546\n2563: 546\n2564: 546\n2565: 546\n2566: 547\n2567: 547\n2568: 548\n2569: 548\n2570: 549\n2571: 549\n2572: 549\n2573: 549\n2574: 549\n2575: 549\n2576: 549\n2577: 549\n2578: 549\n2579: 549\n2580: 549\n2581: 549\n2582: 549\n2583: 549\n2584: 549\n2585: 549\n2586: 549\n2587: 549\n2588: 549\n2589: 549\n2590: 549\n2591: 549\n2592: 549\n2593: 549\n2594: 549\n2595: 549\n2596: 549\n2597: 550\n2598: 550\n2599: 550\n2600: 550\n2601: 550\n2602: 551\n2603: 551\n2604: 552\n2605: 553\n2606: 553\n2607: 553\n2608: 553\n2609: 553\n2610: 553\n2611: 554\n2612: 554\n2613: 554\n2614: 554\n2615: 554\n2616: 554\n2617: 554\n2618: 554\n2619: 554\n2620: 554\n2621: 554\n2622: 554\n2623: 554\n2624: 554\n2625: 554\n2626: 554\n2627: 554\n2628: 554\n2629: 554\n2630: 554\n2631: 554\n2632: 554\n2633: 554\n2634: 554\n2635: 554\n2636: 554\n2637: 554\n2638: 555\n2639: 555\n2640: 555\n2641: 555\n2642: 555\n2643: 555\n2644: 555\n2645: 555\n2646: 556\n2647: 557\n2648: 557\n2649: 557\n2650: 558\n2651: 558\n2652: 558\n2653: 558\n2654: 559\n2655: 559\n2656: 561\n2657: 561\n2658: 561\n2659: 561\n2660: 561\n2661: 561\n2662: 561\n2663: 561\n2664: 561\n2665: 561\n2666: 561\n2667: 561\n2668: 561\n2669: 561\n2670: 561\n2671: 564\n2672: 564\n2673: 565\n2674: 565\n2675: 567\n2676: 568\n2677: 568\n2678: 569\n2679: 569\n2680: 569\n2681: 571\n2682: 572\n2683: 572\n2684: 572\n2685: 572\n2686: 572\n2687: 572\n2688: 572\n2689: 572\n2690: 572\n2691: 572\n2692: 572\n2693: 572\n2694: 572\n2695: 572\n2696: 572\n2697: 572\n2698: 572\n2699: 572\n2700: 572\n2701: 573\n2702: 573\n2703: 574\n2704: 575\n2705: 576\n2706: 576\n2707: 577\n2708: 578\n2709: 578\n2710: 578\n2711: 578\n2712: 578\n2713: 578\n2714: 578\n2715: 578\n2716: 579\n2717: 579\n2718: 579\n2719: 579\n2720: 579\n2721: 579\n2722: 580\n2723: 580\n2724: 580\n2725: 580\n2726: 580\n2727: 580\n2728: 580\n2729: 580\n2730: 580\n2731: 580\n2732: 580\n2733: 581\n2734: 582\n2735: 582\n2736: 582\n2737: 582\n2738: 583\n2739: 584\n2740: 584\n2741: 584\n2742: 585\n2743: 585\n2744: 586\n2745: 587\n2746: 587\n2747: 588\n2748: 588\n2749: 615\n2750: 589\n2751: 589\n2752: 590\n2753: 592\n2754: 593\n2755: 593\n2756: 596\n2757: 596\n2758: 597\n2759: 597\n2760: 597\n2761: 597\n2762: 598\n2763: 599\n2764: 599\n2765: 600\n2766: 608\n2767: 602\n2768: 602\n2769: 603\n2770: 605\n2771: 606\n2772: 606\n2773: 606\n2774: 606\n2775: 607\n2776: 608\n2777: 609\n2778: 610\n2779: 611\n2780: 611\n2781: 612\n2782: 612\n2783: 612\n2784: 613\n2785: 613\n2786: 614\n2787: 614\n2788: 615\n2789: 615\n2790: 615\n2791: 615\n2792: 615\n2793: 615\n2794: 616\n2795: 616\n2796: 616\n2797: 616\n2798: 616\n2799: 616\n2800: 617\n"
  },
  {
    "path": "spec/nmatrix_yale_spec.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == nmatrix_yale_spec.rb\n#\n# Basic tests for NMatrix's Yale storage type.\n#\nrequire 'spec_helper'\nrequire \"./lib/nmatrix\"\n\ndescribe NMatrix do\n  context :yale do\n\n    it \"compares two empty matrices\" do\n      n = NMatrix.new(4, stype: :yale, dtype: :float64)\n      m = NMatrix.new(4, stype: :yale, dtype: :float64)\n      expect(n).to eq(m)\n    end\n\n    it \"compares two matrices following basic assignments\" do\n      n = NMatrix.new(2, stype: :yale, dtype: :float64)\n      m = NMatrix.new(2, stype: :yale, dtype: :float64)\n\n      m[0,0] = 1\n      m[0,1] = 1\n      expect(n).not_to eq(m)\n      n[0,0] = 1\n      expect(n).not_to eq(m)\n      n[0,1] = 1\n      expect(n).to eq(m)\n    end\n\n    it \"compares two matrices following elementwise operations\" do\n      n = NMatrix.new(2, stype: :yale, dtype: :float64)\n      m = NMatrix.new(2, stype: :yale, dtype: :float64)\n      n[0,1] = 1\n      m[0,1] = -1\n      x = n+m\n      expect(n+m).to eq(NMatrix.new(2, 0.0, stype: :yale))\n    end\n\n    it \"sets diagonal values\" do\n      n = NMatrix.new([2,3], stype: :yale, dtype: :float64)\n      n.extend(NMatrix::YaleFunctions)\n      n[1,1] = 0.1\n      n[0,0] = 0.2\n      expect(n.yale_d).to eq([0.2, 0.1])\n    end\n\n    it \"gets non-diagonal rows as hashes\" do\n      n = NMatrix.new([4,6], stype: :yale, dtype: :float64)\n      n.extend(NMatrix::YaleFunctions)\n      n[0,0] = 0.1\n      n[0,2] = 0.2\n      n[0,3] = 0.3\n      n[1,5] = 0.4\n      h = n.yale_nd_row(0, :hash)\n      expect(h).to eq({2 => 0.2, 3 => 0.3})\n    end\n\n    it \"gets non-diagonal occupied column indices for a given row\" do\n      n = NMatrix.new([4,6], stype: :yale, dtype: :float64)\n      n.extend(NMatrix::YaleFunctions)\n      n[0,0] = 0.1\n      n[0,2] = 0.2\n      n[0,3] = 0.3\n      n[1,5] = 0.4\n      a = n.yale_nd_row(0, :array)\n      expect(a).to eq([2,3])\n    end\n\n    it \"does not resize until necessary\" do\n      n = NMatrix.new([2,3], stype: :yale, dtype: :float64)\n      n.extend(NMatrix::YaleFunctions)\n      expect(n.yale_size).to eq(3)\n      expect(n.capacity).to eq(5)\n      n[0,0] = 0.1\n      n[0,1] = 0.2\n      n[1,0] = 0.3\n      expect(n.yale_size).to eq(5)\n      expect(n.capacity).to eq(5)\n    end\n\n\n    it \"sets when not resizing\" do\n      n = NMatrix.new([2,3], stype: :yale, dtype: :float64)\n      n.extend(NMatrix::YaleFunctions)\n      n[0,0] = 0.1\n      n[0,1] = 0.2\n      n[1,0] = 0.3\n      expect(n.yale_a).to eq([0.1, 0.0, 0.0, 0.2, 0.3])\n      expect(n.yale_ija).to eq([3,4,5,1,0])\n    end\n\n    it \"sets when resizing\" do\n      n = NMatrix.new([2,3], stype: :yale, dtype: :float64)\n      n.extend(NMatrix::YaleFunctions)\n      n[0,0] = 0.01\n      n[1,1] = 0.1\n      n[0,1] = 0.2\n      n[1,0] = 0.3\n      n[1,2] = 0.4\n      expect(n.yale_d).to eq([0.01, 0.1])\n      expect(n.yale_ia).to eq([3,4,6])\n      expect(n.yale_ja).to eq([1,0,2,nil])\n      expect(n.yale_lu).to eq([0.2, 0.3, 0.4, nil])\n    end\n\n    it \"resizes without erasing values\" do\n      require 'yaml'\n\n      associations = File.open('spec/nmatrix_yale_resize_test_associations.yaml') { |y| YAML::load(y) }\n\n      n = NMatrix.new([618,2801], stype: :yale, dtype: :byte, capacity: associations.size)\n      #n = NMatrix.new(:yale, [618, 2801], associations.size, :byte)\n\n      associations.each_pair do |j,i|\n        n[i,j] = 1\n        expect(n[i,j]).to be(1), \"Value at #{i},#{j} not inserted correctly!\"\n      end\n\n      associations.each_pair do |j,i|\n        expect(n[i,j]).to be(1), \"Value at #{i},#{j} erased during resize!\"\n      end\n    end\n\n    it \"sets values within rows\" do\n      n = NMatrix.new([3,20], stype: :yale, dtype: :float64)\n      n.extend(NMatrix::YaleFunctions)\n      n[2,1]   = 1.0\n      n[2,0]   = 1.5\n      n[2,15]  = 2.0\n      expect(n.yale_lu).to eq([1.5, 1.0, 2.0])\n      expect(n.yale_ja).to eq([0, 1, 15])\n    end\n\n    it \"gets values within rows\" do\n      n = NMatrix.new([3,20], stype: :yale, dtype: :float64)\n      n[2,1]   = 1.0\n      n[2,0]   = 1.5\n      n[2,15]  = 2.0\n      expect(n[2,1]).to eq(1.0)\n      expect(n[2,0]).to eq(1.5)\n      expect(n[2,15]).to eq(2.0)\n    end\n\n    it \"sets values within large rows\" do\n      n = NMatrix.new([10,300], stype: :yale, dtype: :float64)\n      n.extend(NMatrix::YaleFunctions)\n      n[5,1]   = 1.0\n      n[5,0]   = 1.5\n      n[5,15]  = 2.0\n      n[5,291] = 3.0\n      n[5,292] = 4.0\n      n[5,289] = 5.0\n      n[5,290] = 6.0\n      n[5,293] = 2.0\n      n[5,299] = 7.0\n      n[5,100] = 8.0\n      expect(n.yale_lu).to eq([1.5, 1.0, 2.0, 8.0, 5.0, 6.0, 3.0, 4.0, 2.0, 7.0])\n      expect(n.yale_ja).to eq([0,   1,   15,  100, 289, 290, 291, 292, 293, 299])\n    end\n\n    it \"gets values within large rows\" do\n      n = NMatrix.new([10,300], stype: :yale, dtype: :float64)\n      n.extend(NMatrix::YaleFunctions)\n      n[5,1]   = 1.0\n      n[5,0]   = 1.5\n      n[5,15]  = 2.0\n      n[5,291] = 3.0\n      n[5,292] = 4.0\n      n[5,289] = 5.0\n      n[5,290] = 6.0\n      n[5,293] = 2.0\n      n[5,299] = 7.0\n      n[5,100] = 8.0\n\n      n.yale_ja.each_index do |idx|\n        j = n.yale_ja[idx]\n        expect(n[5,j]).to eq(n.yale_lu[idx])\n      end\n    end\n\n    it \"dots two identical matrices\" do\n      a = NMatrix.new(4, stype: :yale, dtype: :float64)\n      a[0,1] = 4.0\n      a[1,2] = 1.0\n      a[1,3] = 1.0\n      a[3,1] = 2.0\n\n      b = a.dup\n      c = a.dot b\n\n      d = NMatrix.new(4, [0,0,4,4, 0,2,0,0, 0,0,0,0, 0,0,2,2], dtype: :float64, stype: :yale)\n\n      expect(c).to eq(d)\n    end\n\n    it \"dots two identical matrices where a positive and negative partial sum cancel on the diagonal\" do\n      a = NMatrix.new(4, 0.0, stype: :yale)\n\n      a[0,0] = 1.0\n      a[0,1] = 4.0\n      a[1,2] = 2.0\n      a[1,3] = -4.0\n      a[3,1] = 4.0\n      a[3,3] = 4.0\n\n      b = a.dup\n      c = a.dot b\n\n      c.extend(NMatrix::YaleFunctions)\n\n      expect(c.yale_ija.reject { |i| i.nil? }).to eq([5,8,9,9,11,1,2,3,3,1,2])\n      expect(c.yale_a.reject { |i| i.nil? }).to eq([1.0, -16.0, 0.0, 0.0, 0.0, 4.0, 8.0, -16.0, -16.0, 16.0, 8.0])\n\n    end\n\n    it \"dots two vectors\" do\n      n = NMatrix.new([16,1], 0, stype: :yale)\n      m = NMatrix.new([1,16], 0, stype: :yale)\n\n      n[0] = m[0] = 1\n      n[1] = m[1] = 2\n      n[2] = m[2] = 3\n      n[3] = m[3] = 4\n      n[4] = m[4] = 5\n      n[5] = m[5] = 6\n      n[6] = m[6] = 7\n      n[7] = m[7] = 8\n      n[8] = m[8] = 9\n      n[15] = m[15] = 16\n\n      nm = n.dot(m)\n\n      # Perform the same multiplication with dense\n      nmr = n.cast(:dense, :int64).dot(m.cast(:dense, :int64)).cast(:yale, :int64)\n\n      nm.extend(NMatrix::YaleFunctions)\n      nmr.extend(NMatrix::YaleFunctions)\n\n      # We want to do a structure comparison to ensure multiplication is occurring properly, but more importantly, to\n      # ensure that insertion sort is occurring as it should. If the row has more than four entries, it'll run quicksort\n      # instead. Quicksort calls insertion sort for small rows, so we test both with this particular multiplication.\n      expect(nm.yale_ija[0...107]).to eq(nmr.yale_ija[0...107])\n      expect(nm.yale_a[0...107]).to   eq(nmr.yale_a[0...107])\n\n      mn = m.dot(n)\n      expect(mn[0,0]).to eq(541)\n    end\n\n    it \"calculates the row key intersections of two matrices\" do\n      a = NMatrix.new([3,9], [0,1], stype: :yale, dtype: :byte, default: 0)\n      b = NMatrix.new([3,9], [0,0,1,0,1], stype: :yale, dtype: :byte, default: 0)\n      a.extend NMatrix::YaleFunctions\n      b.extend NMatrix::YaleFunctions\n\n      (0...3).each do |ai|\n        (0...3).each do |bi|\n          STDERR.puts (a.yale_ja_d_keys_at(ai) & b.yale_ja_d_keys_at(bi)).inspect\n          expect(a.yale_ja_d_keys_at(ai) & b.yale_ja_d_keys_at(bi)).to eq(a.yale_row_keys_intersection(ai, b, bi))\n        end\n      end\n\n    end\n  end\nend\n"
  },
  {
    "path": "spec/plugins/atlas/atlas_spec.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == atlas_spec.rb\n#\n# Tests for interfaces that are only exposed by nmatrix-atlas\n#\n\nrequire 'spec_helper'\nrequire \"./lib/nmatrix/atlas\"\n\ndescribe \"NMatrix::LAPACK implementation from nmatrix-atlas plugin\" do\n  [:float32, :float64, :complex64, :complex128].each do |dtype|\n    context dtype do\n      it \"exposes clapack_getri\" do\n        a = NMatrix.new(:dense, 3, [1,0,4,1,1,6,-3,0,-10], dtype)\n        ipiv = NMatrix::LAPACK::clapack_getrf(:row, 3, 3, a, 3) # get pivot from getrf, use for getri\n\n        begin\n          NMatrix::LAPACK::clapack_getri(:row, 3, a, 3, ipiv)\n\n          b = NMatrix.new(:dense, 3, [-5,0,-2,-4,1,-1,1.5,0,0.5], dtype)\n          expect(a).to eq(b)\n        rescue NotImplementedError => e\n          pending e.to_s\n        end\n      end\n\n      # potrf decomposes a symmetric (or Hermitian)\n      # positive-definite matrix. The matrix tested below isn't symmetric.\n      # But this is okay since potrf just examines the upper/lower half\n      # (as requested) of the matrix and assumes that the rest is symmetric,\n      # so we just set the other part of the matrix to zero.\n      it \"exposes clapack_potrf upper\" do\n        pending \"potrf requires clapack\" unless NMatrix.has_clapack?\n\n        a = NMatrix.new(:dense, 3, [25,15,-5, 0,18,0, 0,0,11], dtype)\n        NMatrix::LAPACK::clapack_potrf(:row, :upper, 3, a, 3)\n        b = NMatrix.new(:dense, 3, [5,3,-1, 0,3,1, 0,0,3], dtype)\n        expect(a).to eq(b)\n      end\n\n      it \"exposes clapack_potrf lower\" do\n        pending \"potrf requires clapack\" unless NMatrix.has_clapack?\n\n        a = NMatrix.new(:dense, 3, [25,0,0, 15,18,0,-5,0,11], dtype)\n        NMatrix::LAPACK::clapack_potrf(:row, :lower, 3, a, 3)\n        b = NMatrix.new(:dense, 3, [5,0,0, 3,3,0, -1,1,3], dtype)\n        expect(a).to eq(b)\n      end\n\n      it \"exposes clapack_potri\" do\n        pending \"potri requires clapack\" unless NMatrix.has_clapack?\n\n        a = NMatrix.new(3, [4, 0,-1,\n                            0, 2, 1,\n                            0, 0, 1], dtype: dtype)\n        NMatrix::LAPACK::clapack_potrf(:row, :upper, 3, a, 3)\n        NMatrix::LAPACK::clapack_potri(:row, :upper, 3, a, 3)\n        b = NMatrix.new(3, [0.5, -0.5, 1,  0, 1.5, -2,  0, 0, 4], dtype: dtype)\n        err = case dtype\n                when :float32, :complex64\n                  1e-6\n                when :float64, :complex128\n                  1e-14\n              end\n        expect(a).to be_within(err).of(b)\n      end\n\n      it \"exposes clapack_potrs\" do\n        pending \"potrs requires clapack\" unless NMatrix.has_clapack?\n\n        a = NMatrix.new(3, [4, 0,-1,\n                            0, 2, 1,\n                            0, 0, 1], dtype: dtype)\n        b = NMatrix.new([3,1], [3,0,2], dtype: dtype)\n\n        NMatrix::LAPACK::clapack_potrf(:row, :upper, 3, a, 3)\n        NMatrix::LAPACK::clapack_potrs(:row, :upper, 3, 1, a, 3, b, 3)\n\n        x = NMatrix.new([3,1], [3.5, -5.5, 11], dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-5\n                when :float64, :complex128\n                  1e-14\n              end\n\n        expect(b).to be_within(err).of(x)\n      end\n    end\n  end\n\n  [:float32, :float64, :complex64, :complex128].each do |dtype|\n    context dtype do\n      it \"calculates the singular value decomposition with lapack_gesvd\" do\n        #example from Wikipedia\n        m = 4\n        n = 5\n        mn_min = [m,n].min\n        a = NMatrix.new([m,n],[1,0,0,0,2, 0,0,3,0,0, 0,0,0,0,0, 0,4,0,0,0], dtype: dtype)\n        s = NMatrix.new([mn_min], 0, dtype: a.abs_dtype) #s is always real and always returned as float/double, never as complex\n        u = NMatrix.new([m,m], 0, dtype: dtype)\n        vt = NMatrix.new([n,n], 0, dtype: dtype)\n\n        # This is a pure LAPACK function so it expects column-major functions\n        # So we need to transpose the input as well as the output\n        a = a.transpose\n        NMatrix::LAPACK.lapack_gesvd(:a, :a, m, n, a, m, s, u, m, vt, n, 500)\n        u = u.transpose\n        vt = vt.transpose\n\n        s_true = NMatrix.new([mn_min], [4,3,Math.sqrt(5),0], dtype: a.abs_dtype)\n        u_true = NMatrix.new([m,m], [0,0,1,0, 0,1,0,0, 0,0,0,-1, 1,0,0,0], dtype: dtype)\n        vt_true = NMatrix.new([n,n], [0,1,0,0,0, 0,0,1,0,0, Math.sqrt(0.2),0,0,0,Math.sqrt(0.8), 0,0,0,1,0, -Math.sqrt(0.8),0,0,0,Math.sqrt(0.2)], dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-5\n                when :float64, :complex128\n                  1e-14\n              end\n\n        expect(s).to be_within(err).of(s_true)\n        expect(u).to be_within(err).of(u_true)\n        expect(vt).to be_within(err).of(vt_true)\n      end\n\n      it \"calculates the singular value decomposition with lapack_gesdd\" do\n        #example from Wikipedia\n        m = 4\n        n = 5\n        mn_min = [m,n].min\n        a = NMatrix.new([m,n],[1,0,0,0,2, 0,0,3,0,0, 0,0,0,0,0, 0,4,0,0,0], dtype: dtype)\n        s = NMatrix.new([mn_min], 0, dtype: a.abs_dtype) #s is always real and always returned as float/double, never as complex\n        u = NMatrix.new([m,m], 0, dtype: dtype)\n        vt = NMatrix.new([n,n], 0, dtype: dtype)\n\n        # This is a pure LAPACK function so it expects column-major functions\n        # So we need to transpose the input as well as the output\n        a = a.transpose\n        NMatrix::LAPACK.lapack_gesdd(:a, m, n, a, m, s, u, m, vt, n, 500)\n        u = u.transpose\n        vt = vt.transpose\n\n        s_true = NMatrix.new([mn_min], [4,3,Math.sqrt(5),0], dtype: a.abs_dtype)\n        u_true = NMatrix.new([m,m], [0,0,1,0, 0,1,0,0, 0,0,0,-1, 1,0,0,0], dtype: dtype)\n        vt_true = NMatrix.new([n,n], [0,1,0,0,0, 0,0,1,0,0, Math.sqrt(0.2),0,0,0,Math.sqrt(0.8), 0,0,0,1,0, -Math.sqrt(0.8),0,0,0,Math.sqrt(0.2)], dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-5\n                when :float64, :complex128\n                  1e-14\n              end\n\n        expect(s).to be_within(err).of(s_true)\n        expect(u).to be_within(err).of(u_true)\n        expect(vt).to be_within(err).of(vt_true)\n      end\n\n      it \"exposes lapack_geev\" do\n        n = 3\n        a = NMatrix.new([n,n], [-1,0,0, 0,1,-2, 0,1,-1], dtype: dtype)\n        w = NMatrix.new([n], dtype: dtype)\n        if a.complex_dtype? #for real dtypes, imaginary parts of eigenvalues are stored in separate vector\n          wi = nil\n        else\n          wi = NMatrix.new([n], dtype: dtype)\n        end\n        vl = NMatrix.new([n,n], dtype: dtype)\n        vr = NMatrix.new([n,n], dtype: dtype)\n\n        # This is a pure LAPACK routine so it expects column-major matrices,\n        # so we need to transpose everything.\n        a = a.transpose\n        NMatrix::LAPACK::lapack_geev(:left, :right, n, a, n, w, wi, vl, n, vr, n, 2*n)\n        vr = vr.transpose\n        vl = vl.transpose\n\n        if !a.complex_dtype?\n          w = w + wi*Complex(0,1)\n        end\n\n        w_true = NMatrix.new([n], [Complex(0,1), -Complex(0,1), -1], dtype: NMatrix.upcast(dtype, :complex64))\n        if a.complex_dtype?\n          #For complex types the right/left eigenvectors are stored as columns\n          #of vr/vl.\n          vr_true = NMatrix.new([n,n],[0,0,1,\n                                       2/Math.sqrt(6),2/Math.sqrt(6),0,\n                                       Complex(1,-1)/Math.sqrt(6),Complex(1,1)/Math.sqrt(6),0], dtype: dtype)\n          vl_true = NMatrix.new([n,n],[0,0,1,\n                                       Complex(-1,1)/Math.sqrt(6),Complex(-1,-1)/Math.sqrt(6),0,\n                                       2/Math.sqrt(6),2/Math.sqrt(6),0], dtype: dtype)\n        else\n          #For real types, the real part of the first and second eigenvectors is\n          #stored in the first column, the imaginary part of the first (= the\n          #negative of the imaginary part of the second) eigenvector is stored\n          #in the second column, and the third eigenvector (purely real) is the\n          #third column.\n          vr_true = NMatrix.new([n,n],[0,0,1,\n                                       2/Math.sqrt(6),0,0,\n                                       1/Math.sqrt(6),-1/Math.sqrt(6),0], dtype: dtype)\n          vl_true = NMatrix.new([n,n],[0,0,1,\n                                       -1/Math.sqrt(6),1/Math.sqrt(6),0,\n                                       2/Math.sqrt(6),0,0], dtype: dtype)\n        end\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-6\n                when :float64, :complex128\n                  1e-15\n              end\n\n        expect(w).to be_within(err).of(w_true)\n        expect(vr).to be_within(err).of(vr_true)\n        expect(vl).to be_within(err).of(vl_true)\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "spec/plugins/fftw/fftw_spec.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == fftw_spec.rb\n#\n# Tests for interfaces that are only exposed by nmatrix-fftw\n#\n\nrequire 'spec_helper'\nrequire \"./lib/nmatrix/fftw\"\n\ndescribe NMatrix do\n  context \"#fft\" do\n    before do\n      @answer = NMatrix.new([10],\n        [ \n          Complex(330.3200,0.0000)   , Complex(-8.4039  ,-150.3269),\n          Complex(-99.4807,-68.6579) , Complex(-143.6861, -20.4273),\n          Complex(67.6207  ,  8.5236), Complex(130.7800 ,  0.0000),\n          Complex(67.6207 ,  -8.5236), Complex(-143.6861, 20.4273),\n          Complex(-99.4807 , 68.6579), Complex(-8.4039  ,150.3269)\n        ], dtype: :complex128)      \n    end\n\n    it \"computes an FFT of a complex NMatrix\" do\n      nm = NMatrix.new([10],\n        [\n          Complex(9.32,0), Complex(44,0), Complex(125,0), Complex(34,0),\n          Complex(31,0),   Complex(44,0), Complex(12,0),  Complex(1,0),\n          Complex(53.23,0),Complex(-23.23,0)], dtype: :complex128)\n      expect(nm.fft.round(4)).to eq(@answer)\n    end\n  end\n\n  context \"#fft2\" do\n    it \"computes 2D FFT if NMatrix has such shape\" do\n      input = NMatrix.new([2,2],\n        [\n          Complex(9.3200,0), Complex(43.0000,0),\n          Complex(3.2000,0), Complex(4.0000,0)\n        ], dtype: :complex128\n      )\n      output = NMatrix.new([2,2],\n        [\n          Complex(59.520,0), Complex(-34.480,0),\n          Complex(45.120,0),  Complex(-32.880,0),\n        ], dtype: :complex128\n      )\n      expect(input.fft2.round(4)).to eq(output)   \n    end\n  end\nend\n\ndescribe NMatrix::FFTW, focus: true do\n  describe NMatrix::FFTW::Plan do\n    context \".new\" do\n      it \"creates a new plan for default DFT (complex input/complex output)\" do\n        plan = NMatrix::FFTW::Plan.new(4)\n        # TODO: Figure a way to test internal C data structures.\n\n        expect(plan.shape)    .to eq([4])\n        expect(plan.size)     .to eq(4)\n        expect(plan.dim)      .to eq(1)\n        expect(plan.flags)     .to eq([:estimate])\n        expect(plan.direction).to eq(:forward)\n      end\n\n      it \"creates a new plan for multi dimensional DFT with options\" do\n        plan = NMatrix::FFTW::Plan.new([10,5,8],\n          direction: :backward, flags: [:exhaustive, :estimate], dim: 3)\n\n        expect(plan.shape)    .to eq([10,5,8])\n        expect(plan.size)     .to eq(10*5*8)\n        expect(plan.dim)      .to eq(3)\n        expect(plan.flags)    .to eq([:exhaustive, :estimate])\n        expect(plan.direction).to eq(:backward)\n      end\n\n      it \"creates a new plan for real input/complex output\" do\n        plan = NMatrix::FFTW::Plan.new([5,20,10,4,2],\n          direction: :forward, flags: [:patient, :exhaustive], dim: 5, \n          type: :real_complex)\n\n        expect(plan.shape) .to eq([5,20,10,4,2])\n        expect(plan.size)  .to eq(5*20*10*4*2)\n        expect(plan.dim)   .to eq(5)\n        expect(plan.flags) .to eq([:patient, :exhaustive])\n        expect(plan.type)  .to eq(:real_complex)\n      end\n\n      it \"raises error for plan with incompatible shape and dimension\" do\n        expect {\n          NMatrix::FFTW::Plan.new([9], dim: 2, type: :real_complex)\n        }.to raise_error(ArgumentError)\n      end\n\n      it \"creates a new plan for real input/real output\" do\n        plan = NMatrix::FFTW::Plan.new([30,30], type: :real_real, \n          real_real_kind: [:rodft00, :redft10], dim: 2)\n\n        expect(plan.shape).to eq([30,30])\n        expect(plan.size) .to eq(30*30)\n        expect(plan.dim)  .to eq(2)\n        expect(plan.flags).to eq([:estimate])\n        expect(plan.type) .to eq(:real_real)\n      end\n\n      it \"creates a new plan for complex input/real output\" do\n        plan = NMatrix::FFTW::Plan.new([30,400], type: :complex_real, \n          dim: 2, flags: [:patient, :exhaustive])\n\n        expect(plan.shape).to eq([30,400])\n        expect(plan.size) .to eq(30*400)\n        expect(plan.dim)  .to eq(2)\n        expect(plan.flags).to eq([:patient, :exhaustive])\n        expect(plan.type) .to eq(:complex_real)\n      end\n    end\n\n    context \"#set_input\" do\n      it \"accepts nothing but complex128 input for the default or complex_real plan\" do\n        plan  = NMatrix::FFTW::Plan.new(4)\n        input = NMatrix.new([4], [23.54,52.34,52.345,64], dtype: :float64)\n        expect {\n          plan.set_input(input)\n        }.to raise_error(ArgumentError)\n\n        plan = NMatrix::FFTW::Plan.new(4, type: :complex_real)\n        expect {\n          plan.set_input input\n        }.to raise_error(ArgumentError)\n      end\n\n      it \"accepts nothing but float64 input for real_complex or real_real plan\" do\n        plan = NMatrix::FFTW::Plan.new(4, type: :real_complex)\n        input = NMatrix.new([4], [1,2,3,4], dtype: :int32)\n\n        expect {\n          plan.set_input(input)\n        }.to raise_error(ArgumentError)\n      end\n    end\n\n    context \"#execute\" do\n      it \"calculates a basic 1D DFT\" do\n        input = NMatrix.new([10],\n          [\n            Complex(9.32,0),\n            Complex(44,0),\n            Complex(125,0),\n            Complex(34,0),\n            Complex(31,0),\n            Complex(44,0),\n            Complex(12,0),\n            Complex(1,0),\n            Complex(53.23,0),\n            Complex(-23.23,0),\n          ], dtype: :complex128)\n\n        output = NMatrix.new([10],\n          [\n            Complex(330.3200,0.0000),\n            Complex(-8.4039  ,-150.3269),\n            Complex(-99.4807 , -68.6579),\n            Complex(-143.6861, -20.4273),\n            Complex(67.6207  ,  8.5236),\n            Complex(130.7800 ,  0.0000),\n            Complex(67.6207  ,  -8.5236),\n            Complex(-143.6861, 20.4273),\n            Complex(-99.4807 , 68.6579),\n            Complex(-8.4039  ,150.3269)\n          ], dtype: :complex128)\n\n        plan = NMatrix::FFTW::Plan.new(10)\n        plan.set_input input\n        expect(plan.execute).to eq(true)\n        expect(plan.output.round(4)).to eq(output)\n      end\n\n      it \"calculates 2D DFT with options\" do\n        input = NMatrix.new([2,2],\n          [\n            Complex(9.3200,0), Complex(43.0000,0),\n            Complex(3.2000,0), Complex(4.0000,0)\n          ], dtype: :complex128\n        )\n\n        output = NMatrix.new([2,2],\n          [\n            Complex(59.520,0), Complex(-34.480,0),\n            Complex(45.120,0),  Complex(-32.880,0),\n          ], dtype: :complex128\n        )\n\n        plan = NMatrix::FFTW::Plan.new([2,2],\n          direction: :forward, flags: :estimate, dim: 2)\n        plan.set_input input\n        expect(plan.execute).to eq(true)\n        expect(plan.output).to eq(output)\n      end\n\n      it \"calculates ND DFT with options\" do\n\n      end\n\n      it \"calculates 1D real input/complex output DFT\" do\n        input  = NMatrix.new([4], [3.10, 1.73, 1.04, 2.83], dtype: :float64)\n        output = NMatrix.new([3], \n          [Complex(8.70, 0), Complex(2.06, 1.1), Complex(-0.42, 0)], dtype: :complex128)\n        plan = NMatrix::FFTW::Plan.new([4], type: :real_complex)\n        plan.set_input input\n        expect(plan.execute).to eq(true)\n        expect(plan.output).to eq(output)\n      end\n\n      it \"calculates 2D real input/complex output DFT\" do\n        input = NMatrix.new([16], [\n          1  ,   5,54    ,656,\n          4.3,1.32,-43.34,14 ,\n          1  ,   5,    54,656,\n          4.3,1.32,-43.34,14\n          ], dtype: :float64) \n        output = NMatrix.new([9],\n          [\n            Complex(1384.56, 0.0),\n            Complex(-10.719999999999999, 1327.36),\n            Complex(-1320.72, 0.0),\n            Complex(0.0, 0.0),\n            Complex(0.0, 0.0),\n            Complex(0.0, 0.0),\n            Complex(1479.44, 0.0),\n            Complex(-201.28, 1276.64),\n            Complex(-1103.28, 0.0)\n          ], dtype: :complex128\n        )\n\n        plan = NMatrix::FFTW::Plan.new([4,4], type: :real_complex, dim: 2)\n        plan.set_input input\n        expect(plan.execute).to eq(true)\n        expect(plan.output).to eq(output)\n      end\n\n      it \"calculates 1D complex input/real output DFT\" do\n        input = NMatrix.new([8],\n          [\n            Complex(9.32,0),\n            Complex(43.0,0),\n            Complex(3.20,0),\n            Complex(4.00,0),\n            Complex(5.32,0),\n            Complex(3.20,0),\n            Complex(4.00,0),\n            Complex(5.32,0)\n          ], dtype: :complex128)\n\n        output = NMatrix.new([8], [\n            115.04,59.1543,8.24,-51.1543,-72.96,-51.1543,8.24,59.1543\n          ], dtype: :float64)\n\n        plan = NMatrix::FFTW::Plan.new([8], type: :complex_real)\n        plan.set_input input\n        expect(plan.execute).to eq(true)\n        expect(plan.output.round(4)).to eq(output)\n      end\n\n      it \"calculates 2D complex input/real output DFT\" do\n        input = NMatrix.new([9],\n          [\n            Complex(9.32,0),\n            Complex(43.0,0),\n            Complex(3.20,0),\n            Complex(4.00,0),\n            Complex(5.32,0),\n            Complex(3.20,0),\n            Complex(4.00,0),\n            Complex(5.32,0),\n            Complex(45.32,0)\n          ], dtype: :complex128)\n        output = NMatrix.new([9], [\n            118.24,-32.36,-32.36,83.86,-35.54,-33.14,83.86,-33.14,-35.54\n          ], dtype: :float64)\n\n        plan = NMatrix::FFTW::Plan.new([3,3], type: :complex_real, dim: 2)\n        plan.set_input input\n        expect(plan.execute).to eq(true)\n        expect(plan.output.round(2)) .to eq(output)\n      end\n\n      it \"calculates basic 1D real input/real output DFT of kind RODFT00\" do\n        input = NMatrix.new([9],\n          [9.32,43.00,3.20,4.00,5.32,3.20,4.00,5.32,45.32], dtype: :float64)\n        output = NMatrix.new([9],\n          [126.56,28.77,165.67,-24.76,105.52,-110.31,-1.23,-116.45,-14.44],\n          dtype: :float64)\n        plan = NMatrix::FFTW::Plan.new([9], type: :real_real, real_real_kind: [:rodft00])\n        plan.set_input input\n        expect(plan.execute).to eq(true)\n        expect(plan.output.round(2)).to eq(output)\n      end\n\n      it \"calculates basic 1D real input/real output DFT of kind REDFT10\" do\n        input = NMatrix.new([9],\n          [9.32,43.00,3.20,4.00,5.32,3.20,4.00,5.32,45.32], dtype: :float64)\n        output = NMatrix.new([9],\n          [245.36,-6.12,126.84,-62.35,35.00,-109.42,-38.24,-92.49,-21.20], \n          dtype: :float64)\n\n        plan = NMatrix::FFTW::Plan.new([9], type: :real_real, real_real_kind: [:redft10])\n        plan.set_input input\n        expect(plan.execute).to eq(true)\n        expect(plan.output.round(2)).to eq(output)\n      end\n\n      it \"calculates 2D DFT for real input/real output of kind REDFT10, REDFT11\" do\n        input = NMatrix.new([9],\n          [9.32,43.00,3.20,4.00,5.32,3.20,4.00,5.32,45.32], dtype: :float64)\n        output = NMatrix.new([9],\n          [272.181,-249.015,66.045,72.334,23.907,-228.463,85.368,-105.331,30.836],\n          dtype: :float64)\n\n        plan = NMatrix::FFTW::Plan.new([3,3], type: :real_real, \n          real_real_kind: [:redft10, :redft11], dim: 2)\n        plan.set_input input\n        expect(plan.execute).to eq(true)\n        expect(plan.output.round(3)) .to eq(output)\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "spec/plugins/lapacke/lapacke_spec.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == lapacke_spec.rb\n#\n# Tests for interfaces that are only exposed by nmatrix-lapacke\n#\n\nrequire 'spec_helper'\nrequire \"./lib/nmatrix/lapacke\"\n\ndescribe \"NMatrix::LAPACK functions implemented with LAPACKE interface\" do\n  [:float32, :float64, :complex64, :complex128].each do |dtype|\n    context dtype do\n      it \"exposes lapacke_getrf\" do\n        a = NMatrix.new([3,4], GETRF_EXAMPLE_ARRAY, dtype: dtype)\n        ipiv = NMatrix::LAPACK.lapacke_getrf(:row, 3, 4, a, 4)\n        b = NMatrix.new([3,4], GETRF_SOLUTION_ARRAY, dtype: dtype)\n        ipiv_true = [2,3,3]\n\n        # delta varies for different dtypes\n        err = case dtype\n                when :float32, :complex64\n                  1e-6\n                when :float64, :complex128\n                  1e-14\n              end\n\n        expect(a).to be_within(err).of(b)\n        expect(ipiv).to eq(ipiv_true)\n      end\n\n      it \"exposes lapacke_getri\" do\n        a = NMatrix.new(:dense, 3, [1,0,4,1,1,6,-3,0,-10], dtype)\n        ipiv = NMatrix::LAPACK::lapacke_getrf(:row, 3, 3, a, 3) # get pivot from getrf, use for getri\n\n        # delta varies for different dtypes\n        err = case dtype\n                when :float32, :complex64\n                  1e-5\n                when :float64, :complex128\n                  1e-14\n              end\n\n        NMatrix::LAPACK::lapacke_getri(:row, 3, a, 3, ipiv)\n\n        b = NMatrix.new(:dense, 3, [-5,0,-2,-4,1,-1,1.5,0,0.5], dtype)\n        expect(a).to be_within(err).of(b)\n      end\n\n      it \"exposes lapacke_getrs with vector solutions\" do\n        a     = NMatrix.new(3, [-2,4,-3,3,-2,1,0,-4,3], dtype: dtype)\n        ipiv  = NMatrix::LAPACK::lapacke_getrf(:row, 3, 3, a, 3)\n        b     = NMatrix.new([3,1], [-1, 17, -9], dtype: dtype)\n\n        #be careful! the leading dimenension (lda,ldb) is the number of rows for row-major in LAPACKE. Different from CLAPACK convention!\n        NMatrix::LAPACK::lapacke_getrs(:row, false, 3, 1, a, 3, ipiv, b, 1)\n\n        # delta varies for different dtypes\n        err = case dtype\n                when :float32, :complex64\n                  1e-5\n                when :float64, :complex128\n                  1e-13\n              end\n\n        expect(b[0]).to be_within(err).of(5)\n        expect(b[1]).to be_within(err).of(-15.0/2)\n        expect(b[2]).to be_within(err).of(-13)\n      end\n\n      it \"exposes lapacke_getrs with matrix solutions\" do\n        a     = NMatrix.new(3, [-2,4,-3,3,-2,1,0,-4,3], dtype: dtype)\n        ipiv  = NMatrix::LAPACK::lapacke_getrf(:row, 3, 3, a, 3)\n        b     = NMatrix.new([3,2], [-1, 2, 17, 10, -9, 1], dtype: dtype)\n\n        #be careful! the leading dimenension (lda,ldb) is the number of rows for row-major in LAPACKE. Different from CLAPACK convention!\n        NMatrix::LAPACK::lapacke_getrs(:row, false, 3, 2, a, 3, ipiv, b, 2)\n\n        # delta varies for different dtypes\n        err = case dtype\n                when :float32, :complex64\n                  1e-4\n                when :float64, :complex128\n                  1e-13\n              end\n\n        x = NMatrix.new([3,2], [5, -1.5, -7.5, -21.25, -13, -28], dtype: dtype)\n        expect(b).to be_within(err).of(x)\n      end\n\n      it \"exposes lapacke_potrf\" do\n        # first do upper\n        begin\n          a = NMatrix.new(:dense, 3, [25,15,-5, 0,18,0, 0,0,11], dtype)\n          NMatrix::LAPACK::lapacke_potrf(:row, :upper, 3, a, 3)\n          b = NMatrix.new(:dense, 3, [5,3,-1, 0,3,1, 0,0,3], dtype)\n          expect(a).to eq(b)\n        end\n\n        # then do lower\n        a = NMatrix.new(:dense, 3, [25,0,0, 15,18,0,-5,0,11], dtype)\n        NMatrix::LAPACK::lapacke_potrf(:row, :lower, 3, a, 3)\n        b = NMatrix.new(:dense, 3, [5,0,0, 3,3,0, -1,1,3], dtype)\n        expect(a).to eq(b)\n      end\n\n      it \"exposes lapacke_potri\" do\n        a = NMatrix.new(3, [4, 0,-1,\n                            0, 2, 1,\n                            0, 0, 1], dtype: dtype)\n        NMatrix::LAPACK::lapacke_potrf(:row, :upper, 3, a, 3)\n        NMatrix::LAPACK::lapacke_potri(:row, :upper, 3, a, 3)\n        b = NMatrix.new(3, [0.5, -0.5, 1,  0, 1.5, -2,  0, 0, 4], dtype: dtype)\n        err = case dtype\n                when :float32, :complex64\n                  1e-6\n                when :float64, :complex128\n                  1e-14\n              end\n        expect(a).to be_within(err).of(b)\n      end\n\n      it \"exposes lapacke_potrs with vector solution\" do\n        a = NMatrix.new(3, [4, 0,-1,\n                            0, 2, 1,\n                            0, 0, 1], dtype: dtype)\n        b = NMatrix.new([3,1], [3,0,2], dtype: dtype)\n\n        NMatrix::LAPACK::lapacke_potrf(:row, :upper, 3, a, 3)\n        #ldb is different from CLAPACK versions\n        NMatrix::LAPACK::lapacke_potrs(:row, :upper, 3, 1, a, 3, b, 1)\n\n        x = NMatrix.new([3,1], [3.5, -5.5, 11], dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-5\n                when :float64, :complex128\n                  1e-14\n              end\n\n        expect(b).to be_within(err).of(x)\n      end\n\n      it \"exposes lapacke_potrs with matrix solution\" do\n        a = NMatrix.new(3, [4, 0,-1,\n                            0, 2, 1,\n                            0, 0, 1], dtype: dtype)\n        b = NMatrix.new([3,2], [3,4,\n                                0,4,\n                                2,0], dtype: dtype)\n\n        NMatrix::LAPACK::lapacke_potrf(:row, :upper, 3, a, 3)\n        #ldb is different from CLAPACK versions\n        NMatrix::LAPACK::lapacke_potrs(:row, :upper, 3, 2, a, 3, b, 2)\n\n        x = NMatrix.new([3,2], [3.5, 0,\n                                -5.5, 4,\n                                11, -4], dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-5\n                when :float64, :complex128\n                  1e-14\n              end\n\n        expect(b).to be_within(err).of(x)\n      end\n\n      it \"calculates the singular value decomposition with lapacke_gesvd\" do\n        #example from Wikipedia\n        m = 4\n        n = 5\n        mn_min = [m,n].min\n        a = NMatrix.new([m,n],[1,0,0,0,2, 0,0,3,0,0, 0,0,0,0,0, 0,4,0,0,0], dtype: dtype)\n        s = NMatrix.new([mn_min], 0, dtype: a.abs_dtype) #s is always real and always returned as float/double, never as complex\n        u = NMatrix.new([m,m], 0, dtype: dtype)\n        vt = NMatrix.new([n,n], 0, dtype: dtype)\n        superb = NMatrix.new([mn_min-1], dtype: a.abs_dtype)\n\n        NMatrix::LAPACK.lapacke_gesvd(:row, :a, :a, m, n, a, n, s, u, m, vt, n, superb)\n\n        s_true = NMatrix.new([mn_min], [4,3,Math.sqrt(5),0], dtype: a.abs_dtype)\n        u_true = NMatrix.new([m,m], [0,0,1,0, 0,1,0,0, 0,0,0,-1, 1,0,0,0], dtype: dtype)\n        vt_true = NMatrix.new([n,n], [0,1,0,0,0, 0,0,1,0,0, Math.sqrt(0.2),0,0,0,Math.sqrt(0.8), 0,0,0,1,0, -Math.sqrt(0.8),0,0,0,Math.sqrt(0.2)], dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-5\n                when :float64, :complex128\n                  1e-14\n              end\n\n        expect(s).to be_within(err).of(s_true)\n        expect(u).to be_within(err).of(u_true)\n        expect(vt).to be_within(err).of(vt_true)\n      end\n\n      it \"calculates the singular value decomposition with lapacke_gesdd\" do\n        #example from Wikipedia\n        m = 4\n        n = 5\n        mn_min = [m,n].min\n        a = NMatrix.new([m,n],[1,0,0,0,2, 0,0,3,0,0, 0,0,0,0,0, 0,4,0,0,0], dtype: dtype)\n        s = NMatrix.new([mn_min], 0, dtype: a.abs_dtype) #s is always real and always returned as float/double, never as complex\n        u = NMatrix.new([m,m], 0, dtype: dtype)\n        vt = NMatrix.new([n,n], 0, dtype: dtype)\n\n        NMatrix::LAPACK.lapacke_gesdd(:row, :a, m, n, a, n, s, u, m, vt, n)\n\n        s_true = NMatrix.new([mn_min], [4,3,Math.sqrt(5),0], dtype: a.abs_dtype)\n        u_true = NMatrix.new([m,m], [0,0,1,0, 0,1,0,0, 0,0,0,-1, 1,0,0,0], dtype: dtype)\n        vt_true = NMatrix.new([n,n], [0,1,0,0,0, 0,0,1,0,0, Math.sqrt(0.2),0,0,0,Math.sqrt(0.8), 0,0,0,1,0, -Math.sqrt(0.8),0,0,0,Math.sqrt(0.2)], dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-5\n                when :float64, :complex128\n                  1e-14\n              end\n\n        expect(s).to be_within(err).of(s_true)\n        expect(u).to be_within(err).of(u_true)\n        expect(vt).to be_within(err).of(vt_true)\n      end\n\n      it \"calculates eigenvalues and eigenvectors using lapacke_geev\" do\n        n = 3\n        a = NMatrix.new([n,n], [-1,0,0, 0,1,-2, 0,1,-1], dtype: dtype)\n        w = NMatrix.new([n], dtype: dtype)\n        if a.complex_dtype? #for real dtypes, imaginary parts of eigenvalues are stored in separate vector\n          wi = nil\n        else\n          wi = NMatrix.new([n], dtype: dtype)\n        end\n        vl = NMatrix.new([n,n], dtype: dtype)\n        vr = NMatrix.new([n,n], dtype: dtype)\n\n        NMatrix::LAPACK.lapacke_geev(:row, :t, :t, n, a, n, w, wi, vl, n, vr, n)\n\n        if !a.complex_dtype?\n          w = w + wi*Complex(0,1)\n        end\n\n        w_true = NMatrix.new([n], [Complex(0,1), -Complex(0,1), -1], dtype: NMatrix.upcast(dtype, :complex64))\n        if a.complex_dtype?\n          #For complex types the right/left eigenvectors are stored as columns\n          #of vr/vl.\n          vr_true = NMatrix.new([n,n],[0,0,1,\n                                       2/Math.sqrt(6),2/Math.sqrt(6),0,\n                                       Complex(1,-1)/Math.sqrt(6),Complex(1,1)/Math.sqrt(6),0], dtype: dtype)\n          vl_true = NMatrix.new([n,n],[0,0,1,\n                                       Complex(-1,1)/Math.sqrt(6),Complex(-1,-1)/Math.sqrt(6),0,\n                                       2/Math.sqrt(6),2/Math.sqrt(6),0], dtype: dtype)\n        else\n          #For real types, the real part of the first and second eigenvectors is\n          #stored in the first column, the imaginary part of the first (= the\n          #negative of the imaginary part of the second) eigenvector is stored\n          #in the second column, and the third eigenvector (purely real) is the\n          #third column.\n          vr_true = NMatrix.new([n,n],[0,0,1,\n                                       2/Math.sqrt(6),0,0,\n                                       1/Math.sqrt(6),-1/Math.sqrt(6),0], dtype: dtype)\n          vl_true = NMatrix.new([n,n],[0,0,1,\n                                       -1/Math.sqrt(6),1/Math.sqrt(6),0,\n                                       2/Math.sqrt(6),0,0], dtype: dtype)\n        end\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-6\n                when :float64, :complex128\n                  1e-15\n              end\n\n        expect(w).to be_within(err).of(w_true)\n        expect(vr).to be_within(err).of(vr_true)\n        expect(vl).to be_within(err).of(vl_true)\n      end\n      \n      it \"exposes lapacke_geqrf\" do\n        a = NMatrix.new(3, [12.0, -51.0,   4.0, \n                             6.0, 167.0, -68.0, \n                            -4.0,  24.0, -41.0] , dtype: dtype)\n\n        b = NMatrix.new([3,1], 0, dtype: dtype)\n\n        NMatrix::LAPACK::lapacke_geqrf(:row, a.shape[0], a.shape[1], a, a.shape[1], b)\n\n        x = NMatrix.new([3,1], TAU_SOLUTION_ARRAY, dtype: dtype)\n     \n        y = NMatrix.new([3,3], GEQRF_SOLUTION_ARRAY, dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-4\n                when :float64, :complex128\n                  1e-14\n              end\n        \n        expect(b).to be_within(err).of(x)\n        expect(a).to be_within(err).of(y)      \n      end\n\n      it \"calculates QR decomposition in a compressed format using geqrf!\" do\n        a = NMatrix.new(3, [12.0, -51.0,   4.0, \n                             6.0, 167.0, -68.0, \n                            -4.0,  24.0, -41.0] , dtype: dtype)\n\n        tau = a.geqrf!\n    \n        x = NMatrix.new([3,1], TAU_SOLUTION_ARRAY, dtype: dtype)\n     \n        y = NMatrix.new([3,3], GEQRF_SOLUTION_ARRAY, dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-4\n                when :float64, :complex128\n                  1e-14\n              end\n        \n        expect(tau).to be_within(err).of(x)\n        expect(a).to be_within(err).of(y)      \n      end\n\n      it \"exposes lapacke_ormqr and lapacke_unmqr\" do\n        a = NMatrix.new([4,2], [34.0,  21.0, \n                                23.0,  53.0, \n                                26.0, 346.0, \n                                23.0, 121.0] , dtype: dtype)\n\n        tau = NMatrix.new([2,1], dtype: dtype)\n        result = NMatrix.identity(4, dtype: dtype)\n        \n        # get tau from geqrf, use for ormqr  \n        NMatrix::LAPACK::lapacke_geqrf(:row, a.shape[0], a.shape[1], a, a.shape[1], tau)\n\n        #Q is stored in result \n        a.complex_dtype? ?\n          NMatrix::LAPACK::lapacke_unmqr(:row, :left, false, result.shape[0], result.shape[1], tau.shape[0], \n                                                                a, a.shape[1], tau, result, result.shape[1])\n          :\n\n          NMatrix::LAPACK::lapacke_ormqr(:row, :left, false, result.shape[0], result.shape[1], tau.shape[0], \n                                                                a, a.shape[1], tau, result, result.shape[1])\n\n        x = NMatrix.new([4,4], Q_SOLUTION_ARRAY_1, dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-4\n                when :float64, :complex128\n                  1e-14\n              end\n\n        expect(result).to be_within(err).of(x)      \n      end\n\n      it \"calculates the product of the orthogonal matrix with an arbitrary matrix\" do\n        a = N.new([2,2], [34.0, 21, 23, 53] , dtype: dtype)\n\n        tau = NMatrix.new([2,1], dtype: dtype)\n        \n        #Result is the multiplicand that gets overriden : result = Q * result\n        result   = NMatrix.new([2,2], [2,0,0,2], dtype: dtype)\n        \n        # get tau from geqrf, use for ormqr  \n        NMatrix::LAPACK::lapacke_geqrf(:row, a.shape[0], a.shape[1], a, a.shape[1], tau)\n\n        #Q is stored in result \n        a.complex_dtype? ?\n          NMatrix::LAPACK::lapacke_unmqr(:row, :left, false, result.shape[0], result.shape[1], tau.shape[0], \n                                                                a, a.shape[1], tau, result, result.shape[1])\n          :\n\n          NMatrix::LAPACK::lapacke_ormqr(:row, :left, false, result.shape[0], result.shape[1], tau.shape[0], \n                                                                a, a.shape[1], tau, result, result.shape[1])\n\n        x = NMatrix.new([2,2], [-1.6565668262559257 , -1.1206187354084205, \n                                -1.1206187354084205 , 1.6565668262559263], dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-4\n                when :float64, :complex128\n                  1e-14\n              end\n\n        expect(result).to be_within(err).of(x)      \n      end\n      \n      it \"calculates the orthogonal matrix Q using ormqr/unmqr after geqrf!\" do\n        a = NMatrix.new([4,2], [34.0,  21.0, \n                                23.0,  53.0, \n                                26.0, 346.0, \n                                23.0, 121.0] , dtype: dtype)\n        \n        # get tau from geqrf, use for ormqr  \n        tau = a.geqrf!\n\n        #Q is stored in result \n        result = a.complex_dtype? ? a.unmqr(tau) : a.ormqr(tau)\n          \n\n        x = NMatrix.new([4,4], Q_SOLUTION_ARRAY_1, dtype: dtype)\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-4\n                when :float64, :complex128\n                  1e-14\n              end\n\n        expect(result).to be_within(err).of(x)      \n      end\n    end\n    \n    it \"calculates the transpose of Q using ormqr/unmqr after geqrf!\" do\n        a = NMatrix.new([4,2], [34.0,  21.0, \n                                23.0,  53.0, \n                                26.0, 346.0, \n                                23.0, 121.0] , dtype: dtype)\n        \n        # get tau from geqrf, use for ormqr  \n        tau = a.geqrf!\n\n        #Q is stored in result \n        result = a.complex_dtype? ? a.unmqr(tau, :left, :complex_conjugate) : a.ormqr(tau, :left, :transpose)\n          \n\n        x = NMatrix.new([4,4], Q_SOLUTION_ARRAY_1, dtype: dtype)\n        x = x.transpose\n\n        err = case dtype\n                when :float32, :complex64\n                  1e-4\n                when :float64, :complex128\n                  1e-14\n              end\n\n        expect(result).to be_within(err).of(x)      \n    end\n\n    it \"calculates the multiplication c * Q using ormqr/unmqr after geqrf!\" do\n        a = NMatrix.new(3, [12.0, -51.0,   4.0, \n                             6.0, 167.0, -68.0, \n                            -4.0,  24.0, -41.0] , dtype: dtype)\n        \n        # get tau from geqrf, use for ormqr  \n        tau = a.geqrf!\n        c = NMatrix.new([2,3], [1,0,1,0,0,1], dtype: dtype)\n\n        #Q is stored in result \n        result = a.complex_dtype? ? a.unmqr(tau, :right, false, c) : a.ormqr(tau, :right, false, c)\n          \n        solution = NMatrix.new([2,3], [-0.5714285714285714,   0.2228571428571429, 1.2742857142857142,\n                                        0.28571428571428575, -0.1714285714285714, 0.9428571428571428] , dtype: dtype)\n        err = case dtype\n                when :float32, :complex64\n                  1e-4\n                when :float64, :complex128\n                  1e-14\n              end\n\n        expect(result).to be_within(err).of(solution)      \n    end\n  end\nend\n"
  },
  {
    "path": "spec/rspec_monkeys.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == rspec_monkeys.rb\n#\n# A set of monkey patches for RSpec allowing checks of NMatrix types\n#\n\nmodule RSpec::Matchers::BuiltIn\n  class BeWithin\n\n    def of(expected)\n      @expected = expected\n      @unit     = ''\n      if expected.is_a?(NMatrix)\n        @tolerance = if @delta.is_a?(NMatrix)\n                       @delta.clone\n                     elsif @delta.is_a?(Array)\n                       NMatrix.new(:dense, expected.shape, @delta, expected.dtype)\n                     else\n                       NMatrix.ones_like(expected) * @delta\n                     end\n      else\n        @tolerance = @delta\n      end\n\n      self\n    end\n\n    def percent_of(expected)\n      @expected  = expected\n      @unit      = '%'\n      @tolerance = @expected.abs * @delta / 100.0 # <- only change is to reverse abs and @delta\n      self\n    end\n  end\nend"
  },
  {
    "path": "spec/rspec_spec.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == rspec_spec.rb\n#\n# A spec for testing monkey patches to RSpec for NMatrix.\n#\nrequire 'spec_helper'\n\ndescribe \"RSpec\" do\n  it \"should permit #be_within to be used on a dense NMatrix\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    expect(NMatrix.new([4,1], 1.0, dtype: :complex128, stype: :dense) / 10000.0).to be_within(0.00000001).of(NMatrix.new([4,1], 0.0001, dtype: :float64, stype: :dense))\n    expect(NMatrix.new([4,1], 1.0, dtype: :complex128, stype: :dense) / 10000.0).not_to be_within(0.00000001).of(NMatrix.new([4,1], 1.0, dtype: :float64, stype: :dense))\n  end\nend\n"
  },
  {
    "path": "spec/shortcuts_spec.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == shortcuts_spec.rb\n#\n# Specs for the shortcuts used in NMatrix and in NVector.\n#\n\nrequire 'spec_helper'\nrequire 'pry'\n\ndescribe NMatrix do\n  it \"zeros() creates a matrix of zeros\" do\n    m = NMatrix.zeros(3)\n    n = NMatrix.new([3, 3], 0)\n\n    expect(m).to eq n\n  end\n\n  it \"ones() creates a matrix of ones\" do\n    m = NMatrix.ones(3)\n    n = NMatrix.new([3, 3], 1)\n\n    expect(m).to eq n\n  end\n\n  it \"eye() creates an identity matrix\" do\n    m = NMatrix.eye(3)\n    identity3 = NMatrix.new([3, 3], [1, 0, 0, 0, 1, 0, 0, 0, 1])\n\n    expect(m).to eq identity3\n  end\n\n  it \"hilbert() creates an hilbert matrix\" do\n    m = NMatrix.hilbert(8)\n    expect(m[4, 0]).to be_within(0.000001).of(0.2)\n    expect(m[4, 1]).to be_within(0.000001).of(0.16666666666666666)\n    expect(m[4, 2]).to be_within(0.000001).of(0.14285714285714285)\n    expect(m[4, 3]).to be_within(0.000001).of(0.125)\n\n    m = NMatrix.hilbert(3)\n    hilbert3 = NMatrix.new([3, 3], [1.0, 0.5, 0.3333333333333333,\\\n     0.5, 0.3333333333333333, 0.25, 0.3333333333333333, 0.25, 0.2])\n    expect(m).to eq hilbert3\n    0.upto(2) do |i|\n      0.upto(2) do |j|\n        expect(m[i, j]).to be_within(0.000001).of(hilbert3[i,j])\n      end\n    end\n  end\n\n  it \"inv_hilbert() creates an inverse hilbert matrix\" do\n    m = NMatrix.inv_hilbert(6)\n    inv_hilbert6 = [3360.0,  -88200.0,   564480.0, -1411200.0]\n    expect(m[2,0]).to be_within(0.000001).of(inv_hilbert6[0])\n    expect(m[2,1]).to be_within(0.000001).of(inv_hilbert6[1])\n    expect(m[2,2]).to be_within(0.000001).of(inv_hilbert6[2])\n    expect(m[2,3]).to be_within(0.000001).of(inv_hilbert6[3])\n\n    m = NMatrix.inv_hilbert(3)\n    inv_hilbert3 = NMatrix.new([3, 3], [  9.0,  -36.0,   30.0, -36.0,  192.0, -180.0, 30.0, -180.0,  180.0] )\n    0.upto(2) do |i|\n      0.upto(2) do |j|\n        expect(m[i, j]).to be_within(0.000001).of(inv_hilbert3[i,j])\n      end\n    end\n  end\n\n  it \"diag() creates a matrix with pre-supplied diagonal\" do\n    arr = [1,2,3,4]\n    m = NMatrix.diag(arr)\n    expect(m.is_a?(NMatrix)).to be true\n  end\n\n  it \"diagonals() contains the seeded values on the diagonal\" do\n    arr = [1,2,3,4]\n    m = NMatrix.diagonals(arr)\n    expect(m[0,0]).to eq(arr[0])\n    expect(m[1,1]).to eq(arr[1])\n    expect(m[2,2]).to eq(arr[2])\n    expect(m[3,3]).to eq(arr[3])\n  end\n\n  ALL_DTYPES.each do |dtype|\n    [:dense, :yale, :list].each do |stype|\n      context \"#block_diagonal #{dtype} #{stype}\" do\n        it \"block_diagonal() creates a block-diagonal NMatrix\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby? and dtype == :object\n          a = NMatrix.new([2,2], [1,2,\n                                  3,4])\n          b = NMatrix.new([1,1], [123.0])\n          c = NMatrix.new([3,3], [1,2,3,\n                                  1,2,3,\n                                  1,2,3])\n          d = Array[ [1,1,1], [2,2,2], [3,3,3] ]\n          e = 12\n          m = NMatrix.block_diagonal(a, b, c, d, e, dtype: dtype, stype: stype)\n          expect(m).to eq(NMatrix.new([10,10], [1, 2,   0, 0, 0, 0, 0, 0, 0,  0,\n                                                3, 4,   0, 0, 0, 0, 0, 0, 0,  0,\n                                                0, 0, 123, 0, 0, 0, 0, 0, 0,  0,\n                                                0, 0,   0, 1, 2, 3, 0, 0, 0,  0,\n                                                0, 0,   0, 1, 2, 3, 0, 0, 0,  0,\n                                                0, 0,   0, 1, 2, 3, 0, 0, 0,  0,\n                                                0, 0,   0, 0, 0, 0, 1, 1, 1,  0,\n                                                0, 0,   0, 0, 0, 0, 2, 2, 2,  0,\n                                                0, 0,   0, 0, 0, 0, 3, 3, 3,  0,\n                                                0, 0,   0, 0, 0, 0, 0, 0, 0, 12], dtype: dtype, stype: stype))\n        end\n      end\n    end\n  end\n\n  context \"::random\" do\n    it \"creates a matrix of random numbers\" do\n      m = NMatrix.random(2)\n\n      expect(m.stype).to eq(:dense)\n      expect(m.dtype).to eq(:float64)\n    end\n\n    it \"creates a matrix of random numbers with defined seed value\" do\n      m1 = NMatrix.random(2,:seed => 62)\n      m2 = NMatrix.random(2,:seed => 62)\n      m3 = NMatrix.random(2,:seed => 65)\n\n\n      expect(m1).to eq(m2)\n      expect(m1).not_to eq(m3)\n\n    end\n\n    it \"creates a complex matrix of random numbers\" do\n      m = NMatrix.random(2, :dtype => :complex128)\n    end\n\n    it \"correctly accepts :scale parameter\" do\n      m = NMatrix.random([2,2], dtype: :byte, scale: 255)\n      m.each do |v|\n        expect(v).to be >= 0\n        expect(v).to be < 255\n      end\n    end\n\n    it \"only accepts an integer or an array as dimension\" do\n      m = NMatrix.random([2, 2])\n\n      expect(m.stype).to eq(:dense)\n      expect(m.dtype).to eq(:float64)\n\n      expect { NMatrix.random(2.0) }.to raise_error\n      expect { NMatrix.random(\"not an array or integer\") }.to raise_error\n    end\n  end\n\n  context \"::magic\" do\n\n    ALL_DTYPES.each do |dtype|\n      context dtype do\n        it \"creates a matrix with numbers from 1 to n^n(n squared)\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          a = NMatrix.magic(3, dtype: dtype)\n          magic3 = NMatrix.new([3,3], [4, 9, 2, 3, 5, 7, 8, 1, 6], dtype: dtype)\n          expect(a).to eq magic3\n\n          b = NMatrix.magic(4, dtype: dtype)\n          magic4 = NMatrix.new([4,4], [1, 15, 14,  4, 12,  6,  7, 9, 8, 10, 11, 5, 13, 3, 2, 16], dtype: dtype)\n          expect(b).to eq magic4\n\n          c = NMatrix.magic(6, dtype: dtype)\n          magic6 = NMatrix.new([6,6], [31, 9, 2, 22, 27, 20, 3, 32, 7, 21, 23, 25, 35, 1, 6, 26, 19, 24, 4, 36, 29, 13, 18, 11, 30, 5, 34, 12, 14, 16, 8, 28, 33, 17, 10, 15], dtype: dtype)\n          expect(c).to eq magic6\n        end\n      end\n    end\n\n    it \"shape of two is not allowed\" do\n      expect { NMatrix.magic(2) }.to raise_error(ArgumentError)\n    end\n\n    it \"Only accepts an integer as dimension\" do\n      expect { NMatrix.magic(3.0) }.to raise_error(ArgumentError)\n    end\n  end\n\n  context \"::linspace\" do\n    it \"creates a row vector when given only one shape parameter\" do\n      pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n      v = NMatrix.linspace(1, 10, 4)\n      #Expect a row vector only\n      expect(v.shape.length).to eq(1)\n\n      ans = [1.0,4.0,7.0,10.0]\n\n      expect(v[0]).to be_within(0.000001).of(ans[0])\n      expect(v[1]).to be_within(0.000001).of(ans[1])\n      expect(v[2]).to be_within(0.000001).of(ans[2])\n      expect(v[3]).to be_within(0.000001).of(ans[3])\n    end\n\n    it \"creates a matrix of input shape with each entry linearly spaced in row major order\" do\n      pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n      v = NMatrix.linspace(1, Math::PI, [2,2])\n      expect(v.dtype).to eq(:float64)\n\n      ans = [1.0, 1.7138642072677612, 2.4277284145355225, 3.1415927410125732]\n\n      expect(v[0,0]).to be_within(0.000001).of(ans[0])\n      expect(v[0,1]).to be_within(0.000001).of(ans[1])\n      expect(v[1,0]).to be_within(0.000001).of(ans[2])\n      expect(v[1,1]).to be_within(0.000001).of(ans[3])\n    end\n  end\n\n  context \"::logspace\" do\n    it \"creates a logarithmically spaced vector\" do\n      pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n      v = NMatrix.logspace(1, 2, 10)\n\n      expect(v.shape.length).to eq(1)\n\n      #Unit test taken from Matlab R2015b output of logspace(1,2,10)\n      ans = [10.0000, 12.9155, 16.6810, 21.5443, 27.8256, 35.9381, 46.4159, 59.9484, 77.4264, 100.0000]\n\n      expect(v[0].round(4)).to be_within(0.000001).of(ans[0])\n      expect(v[1].round(4)).to be_within(0.000001).of(ans[1])\n      expect(v[2].round(4)).to be_within(0.000001).of(ans[2])\n      expect(v[3].round(4)).to be_within(0.000001).of(ans[3])\n      expect(v[4].round(4)).to be_within(0.000001).of(ans[4])\n      expect(v[5].round(4)).to be_within(0.000001).of(ans[5])\n      expect(v[6].round(4)).to be_within(0.000001).of(ans[6])\n      expect(v[7].round(4)).to be_within(0.000001).of(ans[7])\n      expect(v[8].round(4)).to be_within(0.000001).of(ans[8])\n      expect(v[9].round(4)).to be_within(0.000001).of(ans[9])\n    end\n\n    it \"creates a logarithmically spaced vector bounded by Math::PI if :pi is pre-supplied\" do\n      pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n      v = NMatrix.logspace(1, :pi, 7)\n\n      #Unit test taken from Matlab R2015b output of logspace(1,pi,10)\n      ans = [10.0000, 8.2450, 6.7980, 5.6050, 4.6213, 3.8103, 3.1416]\n\n      expect(v[0].round(4)).to be_within(0.000001).of(ans[0])\n      expect(v[1].round(4)).to be_within(0.000001).of(ans[1])\n      expect(v[2].round(4)).to be_within(0.000001).of(ans[2])\n      expect(v[3].round(4)).to be_within(0.000001).of(ans[3])\n      expect(v[4].round(4)).to be_within(0.000001).of(ans[4])\n      expect(v[5].round(4)).to be_within(0.000001).of(ans[5])\n      expect(v[6].round(4)).to be_within(0.000001).of(ans[6])\n    end\n\n    it \"creates a matrix of input shape with each entry logarithmically spaced in row major order\" do\n      pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n      v = NMatrix.logspace(1, 2, [3,2])\n\n      ans = [10.0, 15.8489, 25.1189, 39.8107, 63.0957, 100.0]\n\n      expect(v[0,0].round(4)).to be_within(0.000001).of(ans[0])\n      expect(v[0,1].round(4)).to be_within(0.000001).of(ans[1])\n      expect(v[1,0].round(4)).to be_within(0.000001).of(ans[2])\n      expect(v[1,1].round(4)).to be_within(0.000001).of(ans[3])\n      expect(v[2,0].round(4)).to be_within(0.000001).of(ans[4])\n      expect(v[2,1].round(4)).to be_within(0.000001).of(ans[5])\n    end\n  end\n\n  it \"seq() creates a matrix of integers, sequentially\" do\n    m = NMatrix.seq(2) # 2x2 matrix.\n    value = 0\n\n    2.times do |i|\n      2.times do |j|\n        expect(m[i,j]).to eq(value)\n        value += 1\n      end\n    end\n  end\n\n  it \"indgen() creates a matrix of integers as well as seq()\" do\n    m = NMatrix.indgen(2) # 2x2 matrix.\n    value = 0\n\n    2.times do |i|\n      2.times do |j|\n        expect(m[i, j]).to eq(value)\n        value += 1\n      end\n    end\n  end\n\n  it \"findgen creates a matrix of floats, sequentially\" do\n    m = NMatrix.findgen(2) # 2x2 matrix.\n    value = 0\n\n    2.times do |i|\n      2.times do |j|\n        expect(m[i, j]/10).to be_within(Float::EPSILON).of(value.to_f/10)\n        value += 1\n      end\n    end\n  end\n\n  it \"bindgen() creates a matrix of bytes\" do\n    m = NMatrix.bindgen(2) # 2x2 matrix.\n    value = 0\n\n    2.times do |i|\n      2.times do |j|\n        expect(m[i, j]).to eq(value)\n        value += 1\n      end\n    end\n  end\n\n  it \"cindgen() creates a matrix of complexes\" do\n    m = NMatrix.cindgen(2) # 2x2 matrix.\n    value = 0\n\n    2.times do |i|\n      2.times do |j|\n        expect(m[i, j].real).to be_within(Float::EPSILON).of(value)\n        expect(m[i, j].imag).to be_within(Float::EPSILON).of(0.0)\n        value += 1\n      end\n    end\n  end\n\n  it \"column() returns a NMatrix\" do\n    m = NMatrix.random(3)\n\n    expect(m.column(2).is_a?(NMatrix)).to be true\n  end\n\n  it \"row() returns a NMatrix\" do\n    m = NMatrix.random(3)\n\n    expect(m.row(2).is_a?(NMatrix)).to be true\n  end\n\n  it \"diagonals() creates an NMatrix\" do\n    arr = [1,2,3,4]\n    m = NMatrix.diagonals(arr)\n    expect(m.is_a?(NMatrix)).to be true\n  end\n\n  it \"diagonals() contains the seeded values on the diagonal\" do\n    arr = [1,2,3,4]\n    m = NMatrix.diagonals(arr)\n    expect(m[0,0]).to eq(arr[0])\n    expect(m[1,1]).to eq(arr[1])\n    expect(m[2,2]).to eq(arr[2])\n    expect(m[3,3]).to eq(arr[3])\n  end\n\n  context \"_like constructors\" do\n    before :each do\n      @nm_1d = NMatrix[5.0,0.0,1.0,2.0,3.0]\n      @nm_2d = NMatrix[[0.0,1.0],[2.0,3.0]]\n    end\n\n    it \"should create an nmatrix of ones with dimensions and type the same as its argument\" do\n      pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n      expect(NMatrix.ones_like(@nm_1d)).to eq NMatrix[1.0, 1.0, 1.0, 1.0, 1.0]\n      expect(NMatrix.ones_like(@nm_2d)).to eq NMatrix[[1.0, 1.0], [1.0, 1.0]]\n    end\n\n    it \"should create an nmatrix of zeros with dimensions and type the same as its argument\" do\n      expect(NMatrix.zeros_like(@nm_1d)).to eq NMatrix[0.0, 0.0, 0.0, 0.0, 0.0]\n      expect(NMatrix.zeros_like(@nm_2d)).to eq NMatrix[[0.0, 0.0], [0.0, 0.0]]\n    end\n  end\n\nend\n\ndescribe \"NVector\" do\n\n  it \"zeros() creates a vector of zeros\" do\n    v = NVector.zeros(4)\n\n    4.times do |i|\n      expect(v[i]).to eq(0)\n    end\n  end\n\n  it \"ones() creates a vector of ones\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    v = NVector.ones(3)\n\n    3.times do |i|\n      expect(v[i]).to eq(1)\n    end\n  end\n\n  it \"random() creates a vector of random numbers\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    v = NVector.random(4)\n    expect(v.dtype).to eq(:float64)\n    expect(v.stype).to eq(:dense)\n  end\n\n  it \"seq() creates a vector of integers, sequentially\" do\n    v = NVector.seq(7)\n    expect(v).to eq(NMatrix.new([7,1], [0, 1, 2, 3, 4, 5, 6]))\n  end\n\n  it \"seq() only accepts integers as dimension\" do\n    expect { NVector.seq(3) }.to_not raise_error\n\n    expect { NVector.seq([1, 3]) }.to raise_error\n    expect { NVector.seq(:wtf) }.to raise_error\n  end\n\n  it \"indgen() creates a vector of integers as well as seq()\" do\n    v = NVector.indgen(7)\n    expect(v).to eq(NMatrix.new([7,1], [0, 1, 2, 3, 4, 5, 6]))\n  end\n\n  it \"findgen creates a vector of floats, sequentially\" do\n    v = NVector.findgen(2)\n    expect(v).to eq(NMatrix.new([2,1], [0.0, 1.0]))\n  end\n\n  it \"bindgen() creates a vector of bytes, sequentially\" do\n    v = NVector.bindgen(4)\n    expect(v).to eq(NMatrix.new([4,1], [0, 1, 2, 3], dtype: :byte))\n  end\n\n  it \"cindgen() creates a vector of complexes, sequentially\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    v = NVector.cindgen(2)\n    expect(v).to eq(NMatrix.new([2,1], [Complex(0.0, 0.0), Complex(1.0, 0.0)], dtype: :complex64))\n  end\n\n  it \"linspace() creates a vector with n values equally spaced between a and b\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    v = NVector.linspace(0, 2, 5)\n    expect(v).to eq(NMatrix.new([5,1], [0.0, 0.5, 1.0, 1.5, 2.0]))\n  end\n\n  it \"logspace() creates a vector with n values logarithmically spaced between decades 10^a and 10^b\" do\n    pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n    v = NVector.logspace(0, 3, 4)\n    expect(v).to eq(NMatrix.new([4,1], [1.0, 10.0, 100.0, 1000.0]))\n  end\nend\n\ndescribe \"Inline constructor\" do\n\n  it \"creates a NMatrix with the given values\" do\n    m = NMatrix.new([2, 2], [1, 4, 6, 7])\n    n = NMatrix[[1, 4], [6, 7]]\n\n    expect(m).to eq n\n  end\nend\n"
  },
  {
    "path": "spec/slice_set_spec.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == slice_set_spec.rb\n#\n# Test of slice set operations.\n\nrequire 'spec_helper'\nrequire 'pry'\n\ndescribe \"Set slice operation\" do\n  include RSpec::Longrun::DSL\n\n  [:dense, :yale, :list].each do |stype|\n    context \"for #{stype}\" do\n      before :each do\n        @m = create_matrix(stype)\n      end\n\n      example \"set and unset a range of entries with single values\" do\n\n        if stype == :yale\n          step \"verify correct arrangement of Yale IJA and A arrays\" do\n            @m.extend NMatrix::YaleFunctions unless jruby?\n            if jruby?\n              pending(\"not yet implemented for NMatrix-JRuby\")\n            else\n              expect(@m.yale_ija).to eq([4,6,8,10,1,2,0,2,0,1])\n            end\n            expect(@m.yale_a).to   eq([0,4,8,0, 1,2,3,5,6,7])\n          end\n        end\n\n        step \"set and reset a single entry\" do\n          n = @m.clone\n          old_val = @m[0,0]\n          @m[0,0] = 100\n          expect(@m[0,0]).to eq(100)\n          @m[0,0] = old_val\n          expect(@m).to eq(n)\n        end\n\n        if stype == :yale\n          n = @m.clone\n          step \"set a row of entries\" do\n            n[0,0..2] = 0\n            expect(n[0,0..2].to_flat_array).to eq([0,0,0])\n            expect(n[1,0..2].to_flat_array).to eq([3,4,5])\n            expect(n[2,0..2].to_flat_array).to eq([6,7,8])\n          end\n\n          step \"set a second row of entries\" do\n            n[2,0..2] = 0\n            expect(n[2,0..2].to_flat_array).to eq([0,0,0])\n            expect(n[1,0..2].to_flat_array).to eq([3,4,5])\n          end\n\n          step \"reset both rows of entries\" do\n            n[0,0..2] = [0,1,2]\n            n[2,0..2] = [6,7,8]\n            expect(n).to eq(@m)\n          end\n        end\n\n        slice_result_a = NMatrix.new(:dense, 2, 100, @m.dtype).cast(stype)\n        slice_result_b = NMatrix.new(:dense, 2, 0,   @m.dtype).cast(stype)\n        m = @m.clone\n\n        step \"set upper left-hand 2x2 corner to 100\" do\n          m[0..1,0..1] = 100\n\n          if stype == :yale\n            expect(m.yale_ija).to eq([4,   6,   8,   10,   1,   2,   0,   2,  0,  1])\n            expect(m.yale_a).to   eq([100, 100, 8,   0,   100,  2, 100,   5,  6,  7])\n          end\n\n          expect(m[0..1,0..1]).to eq(slice_result_a)\n          expect(m[2,0..1]).to eq(@m[2,0..1])\n          expect(m[0..1,2]).to eq(@m[0..1,2])\n        end\n\n        step \"set upper left-hand 2x2 corner to 0\" do\n          m[0..1,0..1] = 0\n          if stype == :yale\n            expect([4,5,6,8,2,2,0,1]).to eq(m.yale_ija)\n            expect([0,0,8,0,2,5,6,7]).to eq(m.yale_a)\n          end\n\n          expect(m[0..1,0..1]).to eq(slice_result_b)\n        end\n\n        m = @m.clone\n        step \"set lower left-hand 2x2 corner to 100\" do\n          m[1..2,0..1] = 100\n          expect(m[1..2,0..1]).to eq(slice_result_a)\n          expect(m[0,0..1]).to eq(@m[0,0..1])\n          expect(m[1..2,2]).to eq(@m[1..2,2])\n        end\n\n        step \"set lower left-hand 2x2 corner to 0\" do\n          m[1..2,0..1] = 0\n          expect(m[1..2,0..1]).to eq(slice_result_b)\n        end\n\n        m = @m.clone\n        step \"set lower right-hand 2x2 corner to 100\" do\n          m[1..2,1..2] = 100\n          expect(m[1..2,1..2]).to eq(slice_result_a)\n          expect(m[0,1..2]).to eq(@m[0,1..2])\n          expect(m[1..2,0]).to eq(@m[1..2,0])\n        end\n\n        step \"set lower right-hand 2x2 corner to 0\" do\n          m[1..2,1..2] = 0\n          expect(m[1..2,1..2]).to eq(slice_result_b)\n        end\n\n        m = @m.clone\n        step \"set upper right-hand 2x2 corner to 100\" do\n          m[0..1,1..2] = 100\n          expect(m[0..1,1..2]).to eq(slice_result_a)\n          expect(m[2,1..2]).to eq(@m[2,1..2])\n          expect(m[0..1,0]).to eq(@m[0..1,0])\n        end\n\n        step \"set upper right-hand 2x2 corner to 0\" do\n          m[0..1,1..2] = 0\n          expect(m[0..1,1..2]).to eq(slice_result_b)\n        end\n      end\n\n      example \"set a range of values to a matrix's contents\" do\n        pending(\"not yet implemented for int dtype for NMatrix-JRuby\") if jruby?\n        x = NMatrix.new(4, stype: :yale, dtype: :int16)\n        x.extend NMatrix::YaleFunctions if stype == :yale\n        x[1..3,1..3] = @m\n        expect(x.to_flat_array).to eq([0,0,0,0, 0,0,1,2, 0,3,4,5, 0,6,7,8])\n      end\n\n    end\n  end\n\nend\n"
  },
  {
    "path": "spec/spec_helper.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == spec_helper.rb\n#\n# Common data and helper functions for testing.\n\nrequire \"rspec/longrun\"\n#require \"narray/narray\"\n\nrequire \"./lib/nmatrix\"\nrequire \"./lib/nmatrix/rspec\"\n\nALL_DTYPES = [:byte,:int8,:int16,:int32,:int64, :float32,:float64, :object,\n  :complex64, :complex128]\n  \nNON_INTEGER_DTYPES = [:float32, :float64, :complex64, :complex128,\n  :object]\n\nFLOAT_DTYPES = [:float32, :float64]\n  \nMATRIX43A_ARRAY = [14.0, 9.0, 3.0, 2.0, 11.0, 15.0, 0.0, 12.0, 17.0, 5.0, 2.0, 3.0]\nMATRIX32A_ARRAY = [12.0, 25.0, 9.0, 10.0, 8.0, 5.0]\n\nCOMPLEX_MATRIX43A_ARRAY = MATRIX43A_ARRAY.zip(MATRIX43A_ARRAY.reverse).collect { |ary| Complex(ary[0], ary[1]) }\nCOMPLEX_MATRIX32A_ARRAY = MATRIX32A_ARRAY.zip(MATRIX32A_ARRAY.reverse).collect { |ary| Complex(ary[0], -ary[1]) }\n\n#3x4 matrix used for testing various getrf and LU decomposition functions\nGETRF_EXAMPLE_ARRAY = [-1,0,10,4,9,2,3,5,7,8,1,6]\nGETRF_SOLUTION_ARRAY = [9.0, 2.0, 3.0, 5.0, 7.0/9, 58.0/9, -4.0/3, 19.0/9, -1.0/9, 1.0/29, 301.0/29, 130.0/29]\n\nTAU_SOLUTION_ARRAY = [1.8571428571428572,1.9938461538461538, 0.0]\n\nGEQRF_SOLUTION_ARRAY =[                -14.0,                -21.0, 14.000000000000002,\n                         0.23076923076923078,  -175.00000000000003,  70.00000000000001,\n                        -0.15384615384615385, 0.055555555555555546,              -35.0]\n\nR_SOLUTION_ARRAY   = [-159.2388143638353, -41.00131005172065, -56.75123892439876,  -90.75048729628048, \n                                     0.0, 25.137473501580676,  2.073591725046292,   9.790607357775713, \n                                     0.0,                0.0, -20.83259700334131, -17.592414929551445]\n\nQ_SOLUTION_ARRAY_1 = [-0.632455532033676, -0.5209522876558295, -0.3984263084135902,  -0.41214704991068,\n                    -0.42783756578748666, -0.20837937347171134, 0.876505919951498, 0.07259770177184455,\n                    -0.48364246567281094, 0.8265854747306287,-0.015758658987033422, -0.2873988222474053,\n                    -0.42783756578748666,  0.044081783789183565, -0.26971376257215296, 0.8615487797670971]\n\nQ_SOLUTION_ARRAY_2 = [-0.8571428571428572,   0.3942857142857143,  0.33142857142857146, \n                      -0.4285714285714286,  -0.9028571428571428, -0.03428571428571425, \n                       0.28571428571428575, -0.1714285714285714,   0.9428571428571428]\n\nQ_SOLUTION_ARRAY_3 = [-0.7724247413634004, -0.026670393594597247, -0.6345460653374136, \n                      -0.5777485870360393,  -0.38541856437557026,  0.7194853024298236,\n                      -0.26375478973384403,   0.9223563413020934, 0.28229805268947933]\n\ndef create_matrix(stype) #:nodoc:\n  m = NMatrix.new([3,3], 0, dtype: :int32, stype: stype, default: 0)\n\n  m[0,0] = 0\n  m[0,1] = 1\n  m[0,2] = 2\n  m[1,0] = 3\n  m[1,1] = 4\n  m[1,2] = 5\n  m[2,0] = 6\n  m[2,1] = 7\n  m[2,2] = 8\n\n  m\nend\n\ndef create_rectangular_matrix(stype) #:nodoc:\n  m = NMatrix.new([5,6], 0, dtype: :int32, stype: stype, default: 0)\n\n  m[0,0] = 1\n  m[0,1] = 2\n  m[0,2] = 3\n  m[0,3] = 4\n  m[0,4] = 5\n  m[0,5] = 0\n\n  m[1,0] = 6\n  m[1,1] = 7\n  m[1,2] = 8\n  m[1,3] = 9\n  m[1,4] = 0\n  m[1,5] = 10\n\n  m[2,0] = 11\n  m[2,1] = 12\n  m[2,2] = 13\n  m[2,3] = 0\n  m[2,4] = 14\n  m[2,5] = 15\n\n  # skip row 3 -- all 0\n  m[3,0] = m[3,1] = m[3,2] = m[3,3] = m[3,4] = m[3,5] = 0\n\n  m[4,0] = 16\n  m[4,1] = 0\n  m[4,2] = 17\n  m[4,3] = 18\n  m[4,4] = 19\n  m[4,5] = 20\n\n  m\nend\n\ndef create_vector(stype) #:nodoc:\n  m = stype == :yale ? NVector.new(stype, 10, :int32) : NVector.new(stype, 10, 0, :int32)\n\n  m[0] = 1\n  m[1] = 2\n  m[2] = 3\n  m[3] = 4\n  m[4] = 5\n  m[5] = 6\n  m[6] = 7\n  m[7] = 8\n  m[8] = 9\n  m[9] = 10\n\n  m\nend\n\n# Stupid but independent comparison for slice_spec\ndef nm_eql(n, m) #:nodoc:\n  if n.shape != m.shape\n    false\n  else # NMatrix\n    n.shape[0].times do |i|\n      n.shape[1].times do |j|\n        if n[i,j] != m[i,j]\n          puts \"n[#{i},#{j}] != m[#{i},#{j}] (#{n[i,j]} != #{m[i,j]})\"\n          return false\n        end\n      end\n    end\n  end\n  true\nend\n\ndef integer_dtype? dtype\n  [:byte,:int8,:int16,:int32,:int64].include?(dtype)\nend\n\n# If a focus: true option is supplied to any test, running `rake spec focus=true`\n# will run only the focused tests and nothing else.\nif ENV[\"focus\"] == \"true\"\n  RSpec.configure do |c|\n    c.filter_run :focus => true\n  end\nend\n\n"
  },
  {
    "path": "spec/stat_spec.rb",
    "content": "# = NMatrix\n#\n# A linear algebra library for scientific computation in Ruby.\n# NMatrix is part of SciRuby.\n#\n# NMatrix was originally inspired by and derived from NArray, by\n# Masahiro Tanaka: http://narray.rubyforge.org\n#\n# == Copyright Information\n#\n# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation\n# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation\n#\n# Please see LICENSE.txt for additional copyright notices.\n#\n# == Contributing\n#\n# By contributing source code to SciRuby, you agree to be bound by\n# our Contributor Agreement:\n#\n# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement\n#\n# == stat_spec.rb\n#\n# Tests for statistical functions in NMatrix.\n#\n\nrequire 'spec_helper'\nrequire 'pry'\n\ndescribe \"Statistical functions\" do\n  context \"mapping and reduction related functions\" do\n    [:dense, :yale, :list].each do |stype|\n      context \"on #{stype} matrices\" do\n        let(:nm_1d) { NMatrix.new([5], [5.0,0.0,1.0,2.0,3.0], stype: stype) unless stype == :yale }\n        let(:nm_2d) { NMatrix.new([2,2], [0.0, 1.0, 2.0, 3.0], stype: stype) }\n\n        it \"behaves like Enumerable#reduce with no argument to reduce\" do\n          expect(nm_1d.reduce_along_dim(0) { |acc, el| acc + el }.to_f).to eq 11 unless stype == :yale\n          expect(nm_2d.reduce_along_dim(1) { |acc, el| acc + el }).to eq NMatrix.new([2,1], [1.0, 5.0], stype: stype)\n        end\n\n        it \"should calculate the mean along the specified dimension\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          unless stype == :yale then\n            puts nm_1d.mean\n            expect(nm_1d.mean).to eq NMatrix.new([1], [2.2], stype: stype, dtype: :float64)\n          end\n          expect(nm_2d.mean).to eq NMatrix[[1.0,2.0], stype: stype]\n          expect(nm_2d.mean(1)).to eq NMatrix[[0.5], [2.5], stype: stype]\n        end\n\n        it \"should calculate the minimum along the specified dimension\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          expect(nm_1d.min).to eq 0.0 unless stype == :yale\n          expect(nm_2d.min).to eq NMatrix[[0.0, 1.0], stype: stype]\n          expect(nm_2d.min(1)).to eq NMatrix[[0.0], [2.0], stype: stype]\n        end\n\n        it \"should calculate the maximum along the specified dimension\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          expect(nm_1d.max).to eq 5.0  unless stype == :yale\n          expect(nm_2d.max).to eq NMatrix[[2.0, 3.0], stype: stype]\n        end\n\n        it \"should calculate the variance along the specified dimension\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          expect(nm_1d.variance).to eq NMatrix[3.7, stype: stype] unless stype == :yale\n          expect(nm_2d.variance(1)).to eq NMatrix[[0.5], [0.5], stype: stype]\n        end\n\n        it \"should calculate the sum along the specified dimension\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          expect(nm_1d.sum).to eq NMatrix[11.0, stype: stype] unless stype == :yale\n          expect(nm_2d.sum).to eq NMatrix[[2.0, 4.0], stype: stype]\n        end\n\n        it \"should calculate the standard deviation along the specified dimension\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          expect(nm_1d.std).to eq NMatrix[Math.sqrt(3.7), stype: stype] unless stype == :yale\n          expect(nm_2d.std(1)).to eq NMatrix[[Math.sqrt(0.5)], [Math.sqrt(0.5)], stype: stype]\n        end\n\n        it \"should raise an ArgumentError when any invalid dimension is provided\" do\n          expect { nm_1d.mean(3) }.to raise_exception(RangeError) unless stype == :yale\n          expect { nm_2d.mean(3) }.to raise_exception(RangeError)\n        end\n\n        it \"should convert to float if it contains only a single element\" do\n          expect(NMatrix[4.0, stype: stype].to_f).to eq 4.0  unless stype == :yale\n          expect(NMatrix[[[[4.0]]], stype: stype].to_f).to eq 4.0  unless stype == :yale\n          expect(NMatrix[[4.0], stype: stype].to_f).to eq 4.0\n        end\n\n        it \"should raise an index error if it contains more than a single element\" do\n          expect { nm_1d.to_f }.to raise_error(IndexError)  unless stype == :yale\n          expect { nm_2d.to_f }.to raise_error(IndexError)\n        end\n\n        it \"should map a block to all elements\" do\n          expect(nm_1d.map { |e| e ** 2 }).to eq NMatrix[25.0,0.0,1.0,4.0,9.0, stype: stype] unless stype == :yale\n          expect(nm_2d.map { |e| e ** 2 }).to eq NMatrix[[0.0,1.0],[4.0,9.0], stype: stype]\n        end\n\n        it \"should map! a block to all elements in place\" do\n          fct = Proc.new { |e| e ** 2 }\n          unless stype == :yale then\n            expected1 = nm_1d.map(&fct)\n            nm_1d.map!(&fct)\n            expect(nm_1d).to eq expected1\n          end\n          expected2 = nm_2d.map(&fct)\n          nm_2d.map!(&fct)\n          expect(nm_2d).to eq expected2\n        end\n\n        it \"should return an enumerator for map without a block\" do\n          expect(nm_2d.map).to be_a Enumerator\n        end\n\n        it \"should return an enumerator for reduce without a block\" do\n          expect(nm_2d.reduce_along_dim(0)).to be_a Enumerator\n        end\n\n        it \"should return an enumerator for each_along_dim without a block\" do\n          expect(nm_2d.each_along_dim(0)).to be_a Enumerator\n        end\n\n        it \"should iterate correctly for map without a block\" do\n          en = nm_1d.map unless stype == :yale\n          expect(en.each { |e| e**2 }).to eq nm_1d.map { |e| e**2 } unless stype == :yale\n          en = nm_2d.map\n          expect(en.each { |e| e**2 }).to eq nm_2d.map { |e| e**2 }\n        end\n\n        it \"should iterate correctly for reduce without a block\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          unless stype == :yale then\n            en = nm_1d.reduce_along_dim(0, 1.0)\n            expect(en.each { |a, e| a+e }.to_f).to eq 12\n          end\n          en = nm_2d.reduce_along_dim(1, 1.0)\n          expect(en.each { |a, e| a+e }).to eq NMatrix[[2.0],[6.0], stype: stype]\n        end\n\n        it \"should iterate correctly for each_along_dim without a block\" do\n          unless stype == :yale then\n            res = NMatrix.zeros_like(nm_1d[0...1])\n            en = nm_1d.each_along_dim(0)\n            en.each { |e| res += e }\n            expect(res.to_f).to eq 11\n          end\n          res = NMatrix.zeros_like (nm_2d[0...2, 0])\n          en = nm_2d.each_along_dim(1)\n          en.each { |e| res += e }\n          expect(res).to eq NMatrix[[1.0], [5.0], stype: stype]\n        end\n\n        it \"should yield matrices of matching dtype for each_along_dim\" do\n          m = NMatrix.new([2,3], [1,2,3,3,4,5], dtype: :complex128, stype: stype)\n          m.each_along_dim(1) do |sub_m|\n            expect(sub_m.dtype).to eq :complex128\n          end\n        end\n\n        it \"should reduce to a matrix of matching dtype for reduce_along_dim\" do\n          m = NMatrix.new([2,3], [1,2,3,3,4,5], dtype: :complex128, stype: stype)\n          m.reduce_along_dim(1) do |acc, sub_m|\n            expect(sub_m.dtype).to eq :complex128\n            acc\n          end\n\n          m.reduce_along_dim(1, 0.0) do |acc, sub_m|\n            expect(sub_m.dtype).to eq :complex128\n            acc\n          end\n        end\n\n        it \"should allow overriding the dtype for reduce_along_dim\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          m = NMatrix[[1,2,3], [3,4,5], dtype: :complex128]\n          m.reduce_along_dim(1, 0.0, :float64) do |acc, sub_m|\n            expect(acc.dtype).to eq :float64\n            acc\n          end\n\n          m = NMatrix[[1,2,3], [3,4,5], dtype: :complex128, stype: stype]\n          m.reduce_along_dim(1, nil, :float64) do |acc, sub_m|\n            expect(acc.dtype).to eq :float64\n            acc\n          end\n        end\n\n        it \"should convert integer dtypes to float when calculating mean\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          m = NMatrix[[1,2,3], [3,4,5], dtype: :int32, stype: stype]\n          expect(m.mean(0).dtype).to eq :float64\n        end\n\n        it \"should convert integer dtypes to float when calculating variance\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          m = NMatrix[[1,2,3], [3,4,5], dtype: :int32, stype: stype]\n          expect(m.variance(0).dtype).to eq :float64\n        end\n\n        it \"should convert integer dtypes to float when calculating standard deviation\" do\n          pending(\"not yet implemented for NMatrix-JRuby\") if jruby?\n          m = NMatrix[[1,2,3], [3,4,5], dtype: :int32, stype: stype]\n          expect(m.std(0).dtype).to eq :float64\n        end\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "spec/test.pcd",
    "content": "VERSION .7\nFIELDS x y z intensity\nSIZE 4 8 8 4\nTYPE F U F I\nCOUNT 1 1 1 1\nWIDTH 256 # comment here to challenge this thing\nHEIGHT 256\nVIEWPOINT 0 0 0 1 0 0 0\nPOINTS 10\nDATA ASCII\n207.008 207.058 1174 0\n207.008 205.441 1174 0\n207.008 203.823 1174 0\n207.008 202.206 1174 0\n207.008 200.589 1174 0\n207.008 198.972 1174 0\n207.008 197.354 1174 0\n207.008 195.737 1174 0\n207.008 194.12 1174 0\n207.008 153.689 1174 0\n"
  },
  {
    "path": "travis.sh",
    "content": "#!/bin/bash\n\nset -ev #fail at the first command that returns non-zero exit value\n\n# Use rbenv on OSX iff ruby_version is given\nif [ -n \"$ruby_version\" -a \"$TRAVIS_OS_NAME\" = \"osx\" ]; then\n  export PATH=\"$HOME/.rbenv/bin:$PATH\"\n  if [ -x $HOME/.rbenv/bin/rbenv ]; then\n    eval \"$(rbenv init -)\"\n  fi\n  export RBENV_VERSION=$ruby_version\n  unset GEM_PATH GEM_HOME\nfi\n\nif [ \"$1\" = \"install\" ]\nthen\n  bundle install --jobs=3 --retry=3 --path=${BUNDLE_PATH:-vendor/bundle}\nfi\n\nif [ \"$1\" = \"before_install\" ]\nthen\n  case \"$TRAVIS_OS_NAME\" in\n    linux)\n      sudo apt-get update -qq\n      ;;\n    osx)\n      brew update >/dev/null\n      ;;\n  esac\n\n  # Installing ruby by using rbenv on OSX iff ruby_version is given\n  if [ -n \"$ruby_version\" -a \"$TRAVIS_OS_NAME\" = \"osx\" ]; then\n    git clone https://github.com/rbenv/rbenv.git ~/.rbenv\n    git clone https://github.com/rbenv/ruby-build.git ~/.rbenv/plugins/ruby-build\n\n    eval \"$(rbenv init -)\"\n\n    # Install ruby\n    (\n      brew install bison openssl readline\n      brew link --force openssl\n      RBENV_VERSION=system\n      MAKEOPTS='-j 4'\n      CONFIGURE_OPTS=\"--disable-install-doc --with-out-ext=tk,tk/tkutil --with-opt-dir=/usr/local\"\n      rbenv install --verbose $ruby_version\n    )\n\n    gem pristine --all\n    gem update --no-document --system\n    gem update --no-document\n  fi\n\n  gem install --no-document bundler -v '~> 1.6'\n\n  if [ -n \"$USE_ATLAS\" ]\n  then\n    case \"$TRAVIS_OS_NAME\" in\n      linux)\n        sudo apt-get install -y libatlas-base-dev\n        ;;\n      osx)\n        echo \"FIXME: ATLAS on OSX environment is not supported, currently\" >2\n        exit 1\n        ;;\n    esac\n  fi\n\n  # travis-ci runs on Ubuntu 12.04, where the openblas package doesn't\n  # provide a liblapack.so, so we test using the blas from openblas\n  # and the reference lapack implementation. Not entirely sure if\n  # this will work.\n  if [ -n \"$USE_OPENBLAS\" ]\n  then\n    case \"$TRAVIS_OS_NAME\" in\n      linux)\n        sudo apt-get install -y libopenblas-dev\n        # Since we install libopenblas first, liblapack won't try to install\n        # libblas (the reference BLAS implementation).\n        sudo apt-get install -y liblapack-dev\n        ;;\n      osx)\n        brew install homebrew/science/openblas\n        ;;\n    esac\n  fi\n\n  if [ -n \"$USE_REF\" ]\n  then\n    case \"$TRAVIS_OS_NAME\" in\n      linux)\n        sudo apt-get install -y liblapack-dev\n        ;;\n      osx)\n        brew install homebrew/dupes/lapack\n        ;;\n    esac\n  fi\nfi\n\nif [ \"$1\" = \"script\" ]\nthen\n  nmatrix_plugins_opt=''\n\n  if [ -n \"$USE_ATLAS\" ]\n  then\n    # Need to put these commands on separate lines (rather than use &&)\n    # so that bash set -e will work.\n    nmatrix_plugins_opt='nmatrix_plugins=atlas'\n  fi\n\n  if [ -n \"$USE_OPENBLAS\" ]\n  then\n    nmatrix_plugins_opt='nmatrix_plugins=lapacke'\n  fi\n\n  if [ -n \"$USE_REF\" ]\n  then\n    nmatrix_plugins_opt='nmatrix_plugins=lapacke'\n  fi\n\n  if [ -n \"$NO_EXTERNAL_LIB\" ]\n  then\n    nmatrix_plugins_opt=''\n  fi\n\n  bundle exec rake travis:env\n\n  if [[ \"$TRAVIS_RUBY_VERSION\" =~ \"jruby\" ]];then\n    bundle exec rake jruby\n    bundle exec rake spec\n  else\n    bundle exec rake compile $nmatrix_plugins_opt || {\n      echo === Contents of mkmf.log ===\n      cat tmp/*/nmatrix/*/mkmf.log\n      exit 1\n    }\n    bundle exec rake spec $nmatrix_plugins_opt\n  fi\n\nfi\n"
  }
]