Full Code of PABannier/encodec.cpp for AI

main 1cc279db4da9 cached
27 files
1.2 MB
323.1k tokens
818 symbols
1 requests
Download .txt
Showing preview only (1,259K chars total). Download the full file or copy to clipboard to get everything.
Repository: PABannier/encodec.cpp
Branch: main
Commit: 1cc279db4da9
Files: 27
Total size: 1.2 MB

Directory structure:
gitextract_5181guli/

├── .github/
│   └── workflows/
│       └── build.yml
├── .gitignore
├── .gitmodules
├── CMakeLists.txt
├── README.md
├── convert.py
├── decoder.h
├── encodec.cpp
├── encodec.h
├── encoder.h
├── examples/
│   ├── CMakeLists.txt
│   ├── README.md
│   ├── common.cpp
│   ├── common.h
│   ├── compress/
│   │   ├── CMakeLists.txt
│   │   └── main.cpp
│   ├── decompress/
│   │   ├── CMakeLists.txt
│   │   └── main.cpp
│   ├── dr_wav.h
│   ├── json.hpp
│   └── main/
│       ├── CMakeLists.txt
│       └── main.cpp
├── lstm.h
├── ops.cpp
├── ops.h
├── quantizer.h
└── utils.h

================================================
FILE CONTENTS
================================================

================================================
FILE: .github/workflows/build.yml
================================================
name: build

on:
  push:
    branches:
      - main
    paths: ['.github/workflows/**', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu']
  pull_request:
    types: [opened, synchronize, reopened]
    paths: ['**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', ".github/workflows/**"]

env:
  BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
  GGML_NLOOP: 3
  GGML_NITER: 1
  GGML_N_THREADS: 1

jobs:
  ubuntu-latest-cmake:
    runs-on: ubuntu-latest

    steps:
      - name: Clone
        id: checkout
        uses: actions/checkout@v4
        with:
          fetch-depth: 0
          submodules: true

      - name: Dependencies
        id: depends
        run: |
          sudo apt-get update
          sudo apt-get install build-essential

      - name: Build
        id: cmake_build
        run: |
          mkdir build
          cd build
          cmake ..
          cmake --build . --config Release

  macOS-latest-cmake:
    runs-on: macos-latest

    steps:
      - name: Clone
        id: checkout
        uses: actions/checkout@v4
        with:
          fetch-depth: 0
          submodules: true

      - name: Dependencies
        id: depends
        continue-on-error: true
        run: |
          brew update

      - name: Build
        id: cmake_build
        run: |
          sysctl -a
          mkdir build
          cd build
          cmake ..
          cmake --build . --config Release

  windows-msys2:
    runs-on: windows-latest

    strategy:
      fail-fast: false

    steps:
      - name: Clone
        uses: actions/checkout@v4
        with:
          fetch-depth: 0
          submodules: recursive

      - name: Setup UCRT64
        uses: msys2/setup-msys2@v2
        with:
          update: true
          msystem: UCRT64
          install: >-
            base-devel
            mingw-w64-ucrt-x86_64-toolchain
            mingw-w64-ucrt-x86_64-cmake

      - name: Build using CMake
        shell: msys2 {0}
        run: |
          cmake -B build
          cmake --build build --config Release -j $(nproc)


================================================
FILE: .gitignore
================================================
main.dSYM
*.bin

encodec
*.o
*.th
.vscode/

build/

*.wav


================================================
FILE: .gitmodules
================================================
[submodule "ggml"]
	path = ggml
	url = https://github.com/ggerganov/ggml.git


================================================
FILE: CMakeLists.txt
================================================
cmake_minimum_required(VERSION 3.12)
project("encodec" C CXX)

if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE)
    set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE)
    set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo")
endif()

set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
set(CMAKE_CXX_FLAGS_RELEASE "-O3")
set(CMAKE_CXX_FLAGS_DEBUG   "-g -O0")

set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)

if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
    set(ENCODEC_STANDALONE ON)
else()
    set(ENCODEC_STANDALONE OFF)
endif()

option(ENCODEC_BUILD_EXAMPLES "encodec: build examples" ${ENCODEC_STANDALONE})

# Build libraries

set(ENCODEC_LIB encodec)
option(BUILD_SHARED_LIBS "build shared libraries" OFF)

add_subdirectory(ggml)

add_library(
    ${ENCODEC_LIB}
    encodec.cpp
    encodec.h
    encoder.h
    decoder.h
    quantizer.h
    ops.cpp
    ops.h
    utils.h
    lstm.h
)

if (ENCODEC_BUILD_EXAMPLES)
    add_subdirectory(examples)
endif()

target_link_libraries(${ENCODEC_LIB} PUBLIC ggml)
target_include_directories(${ENCODEC_LIB} PUBLIC .)
target_compile_features(${ENCODEC_LIB} PUBLIC cxx_std_11)

if (GGML_CUBLAS)
    add_compile_definitions(GGML_USE_CUBLAS)
endif()

if (GGML_METAL)
    add_compile_definitions(GGML_USE_METAL)
endif()


================================================
FILE: README.md
================================================
# encodec.cpp

![encodec.cpp](./assets/banner.png)

[![Actions Status](https://github.com/PABannier/encodec.cpp/actions/workflows/build.yml/badge.svg)](https://github.com/PABannier/encodec.cpp/actions)
[![License: MIT](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT)

High-performance inference of [Meta's Encodec](https://github.com/facebookresearch/encodec) deep learning based audio codec model:

- Plain C/C++ implementation without dependencies using [ggml](https://github.com/ggerganov/ggml)

## Demo

Here is a demo of running Encodec on a single M1 MacBook Pro:

https://github.com/PABannier/encodec.cpp/assets/12958149/d11561be-98e9-4504-bba7-86bcc233a499

## Roadmap

- [x] Support of 24Khz model
- [x] Mixed F16 / F32 precision
- [ ] 4-bit and 8-bit quantization
- [ ] Metal support
- [ ] CoreML support

## Implementation details

- The core tensor operations are implemented in C ([ggml.h](ggml.h) / [ggml.c](ggml.c))
- The encoder-decoder architecture and the high-level C-style API are implemented in C++ ([encodec.h](encodec.h) / [encodec.cpp](encodec.cpp))
- Basic usage is demonstrated in [main.cpp](examples/main).

## Usage

Here are the steps for the encodec model.

### Get the code

```bash
git clone --recurse-submodules https://github.com/PABannier/encodec.cpp.git
cd encodec.cpp
```

### Build

In order to build encodec.cpp you must use `CMake`:

```bash
mkdir build
cd build
cmake ..
cmake --build . --config Release
```

### Using Metal

Offloading to GPU is possible with the Metal backend for MacOS. Performance are not improved but
the power consumption and CPU activity is reduced.

```bash
cmake -DGGML_METAL=ON -DBUILD_SHARED_LIBS=Off ..
cmake --build . --config Release
```

### Using cuBLAS

The inference can be offloaded on a CUDA backend with cuBLAS.

```bash
cmake -DGGML_CUBLAS=ON -DBUILD_SHARED_LIBS=Off ..
cmake --build . --config Release
```


================================================
FILE: convert.py
================================================
"""Convert Encodec checkpoint into the GGML format.

The bytes are packed in a binary file in the following order:
    - Magic (`ggml` in binary format)
    - Tensors

For each tensor, the bytes are packed as follows:
    - Number of dimensions    (int)
    - Name length             (int)
    - Dimensions              (int[n_dims])
    - Name                    (char[name_length])
    - Data                    (float[n_dims])

Note
----
Encodec uses weight normalization for its convolutional layers. All the weights are
decomposed into two tensors called with the suffixes _weight_v and _weight_g. A simple
call to the hook torch._weight_norm allows to get the final weight tensor of the
convolution from weight_v and weight_g. To drastically reduce the number of operations
at inference time, the ggml weights file only contain the final convolution weights but
does not store the decomposition into weight_v and weight_g.

Usage
-----

```bash
    python convert.py \
        --dir-model ./ggml_weights/ \
        --out-dir ./ggml_weights/ \
        --use-f16
```
"""
import argparse
from pathlib import Path
import struct

import numpy as np
import torch

parser = argparse.ArgumentParser()
parser.add_argument("--dir-model", type=str, required=True)
parser.add_argument("--out-dir", type=str, required=True)
parser.add_argument("--use-f16", action="store_true")


def parse_codec_model(checkpoint, outfile, use_f16):
    """Load encodec model checkpoint."""
    n_f16, n_f32 = 0, 0

    for name in checkpoint.keys():
        if "weight_g" in name:
            # the tensor has already been parsed with the corresponding "weight_v"
            # tensor to form the final weights tensor of the convolution, therefore
            # we skip it
            continue

        if "inited" in name or "cluster_size" in name or "embed_avg" in name:
            # "inited", "cluster_size" and "embed_avg" tensors in quantizer are not used
            # for the forward pass
            continue

        var_data = checkpoint[name]

        if not "weight_v" in name:
            # if conv kernel, do not squeeze because 3d tensor
            var_data = var_data.numpy().squeeze()
        else:
            # weight_v has its corresponding magnitude tensor to rescale the weights
            # of the convolutional layers. We parse both kinds of weights jointly to
            # build the final weight tensor of the convolution.
            base_name = name.split(".")[:-1]
            weight_g_name = ".".join(base_name + ["weight_g"])
            var_data_g = checkpoint[weight_g_name]

            final_var_data = torch._weight_norm(var_data, var_data_g, dim=0)
            var_data = final_var_data.numpy()

            name = ".".join(base_name + ["weight"])

        print(f"Processing variable: {name} with shape: {var_data.shape}")

        if use_f16:
            if "embed" in name:
                print("  Converting to float32")
                var_data = var_data.astype(np.float32)
                ftype_cur = 0
                n_f32 += 1
            elif "weight" in name:
                print("  Converting to float16")
                var_data = var_data.astype(np.float16)
                ftype_cur = 1
                n_f16 += 1
            else:
                print("  Converting to float32")
                var_data = var_data.astype(np.float32)
                ftype_cur = 0
                n_f32 += 1
        else:
            print("  Converting to float32")
            var_data = var_data.astype(np.float32)
            ftype_cur = 0
            n_f32 += 1

        n_dims = len(var_data.shape)
        encoded_name = name.encode("utf-8")
        outfile.write(struct.pack("iii", n_dims, len(encoded_name), ftype_cur))

        for i in range(n_dims):
            outfile.write(struct.pack("i", var_data.shape[n_dims - 1 - i]))
        outfile.write(encoded_name)

        var_data.tofile(outfile)

    outfile.close()

    print("\n")
    print(f"n_f16: {n_f16} ({n_f16/(n_f16 + n_f32)*100:.0f}%)")
    print(f"n_f32: {n_f32} ({n_f32/(n_f16 + n_f32)*100:.0f}%)")


def parse_hparams(outfile, use_f16):
    # for now this is hardcoded as we only support the 24Khz model
    in_channels = 1
    hidden_dim = 128
    n_filters = 32
    kernel_size = 7
    residual_kernel_size = 3
    n_bins = 1024
    bandwidth = 24
    sr = 24000
    ftype = int(use_f16)

    outfile.write(struct.pack("i", in_channels))
    outfile.write(struct.pack("i", hidden_dim))
    outfile.write(struct.pack("i", n_filters))
    outfile.write(struct.pack("i", kernel_size))
    outfile.write(struct.pack("i", residual_kernel_size))
    outfile.write(struct.pack("i", n_bins))
    outfile.write(struct.pack("i", bandwidth))
    outfile.write(struct.pack("i", sr))
    outfile.write(struct.pack("i", ftype))


if __name__ == "__main__":
    args = parser.parse_args()

    dir_model = Path(args.dir_model)

    out_dir = Path(args.out_dir)
    out_dir.mkdir(exist_ok=True, parents=True)

    outfile = Path(out_dir / "ggml-model.bin")

    checkpoint = torch.load(dir_model / "encodec_24khz-d7cc33bc.th", map_location="cpu")

    # Step 1: insert ggml magic
    outfile = open(outfile, "wb")
    outfile.write(struct.pack("i", 0x67676d6c))

    # Step 2: insert hyperparameters
    parse_hparams(outfile, args.use_f16)

    # Step 3: insert weights
    parse_codec_model(checkpoint, outfile, args.use_f16)

    print("Done.")


================================================
FILE: decoder.h
================================================
#pragma once

#include <vector>

#include "ggml.h"
#include "ggml-alloc.h"
#include "ggml-backend.h"

#include "lstm.h"
#include "utils.h"


struct encodec_decoder_block {
    // upsampling layers
    struct ggml_tensor *us_conv_w;
    struct ggml_tensor *us_conv_b;

    // conv1
    struct ggml_tensor *conv_1_w;
    struct ggml_tensor *conv_1_b;

    // conv2
    struct ggml_tensor *conv_2_w;
    struct ggml_tensor *conv_2_b;

    // shortcut
    struct ggml_tensor *conv_sc_w;
    struct ggml_tensor *conv_sc_b;
};

struct encodec_decoder {
    struct ggml_tensor *init_conv_w;
    struct ggml_tensor *init_conv_b;

    encodec_lstm lstm;

    struct ggml_tensor *final_conv_w;
    struct ggml_tensor *final_conv_b;

    std::vector<encodec_decoder_block> blocks;
};

struct ggml_tensor *encodec_forward_decoder(
    const struct encodec_decoder *decoder, struct ggml_context *ctx0,
    struct ggml_tensor *quantized_out, const int *ratios, const int kernel_size, const int res_kernel_size,
    const int stride) {

    if (!quantized_out) {
        fprintf(stderr, "%s: null input tensor\n", __func__);
        return NULL;
    }

    struct ggml_tensor *inpL = strided_conv_1d(
        ctx0, quantized_out, decoder->init_conv_w, decoder->init_conv_b, stride);

    // lstm
    {
        struct ggml_tensor *cur = inpL;

        const encodec_lstm lstm = decoder->lstm;

        // first lstm layer
        char l0_prefix[7] = "dec_l0";
        struct ggml_tensor *hs1 = forward_pass_lstm_unilayer(
            ctx0, cur, lstm.l0_ih_w, lstm.l0_hh_w, lstm.l0_ih_b, lstm.l0_hh_b, l0_prefix);

        // second lstm layer
        char l1_prefix[7] = "dec_l1";
        struct ggml_tensor *out = forward_pass_lstm_unilayer(
            ctx0, hs1, lstm.l1_ih_w, lstm.l1_hh_w, lstm.l1_ih_b, lstm.l1_hh_b, l1_prefix);

        inpL = ggml_add(ctx0, inpL, out);
    }

    for (int layer_ix = 0; layer_ix < 4; layer_ix++) {
        encodec_decoder_block block = decoder->blocks[layer_ix];

        // upsampling layers
        inpL = ggml_elu(ctx0, inpL);

        inpL = strided_conv_transpose_1d(
            ctx0, inpL, block.us_conv_w, block.us_conv_b, ratios[layer_ix]);

        struct ggml_tensor *current = inpL;

        // shortcut
        struct ggml_tensor *shortcut = strided_conv_1d(
            ctx0, inpL, block.conv_sc_w, block.conv_sc_b, stride);

        // conv1
        current = ggml_elu(ctx0, current);

        current = strided_conv_1d(
            ctx0, current, block.conv_1_w, block.conv_1_b, stride);

        // conv2
        current = ggml_elu(ctx0, current);

        current = strided_conv_1d(
            ctx0, current, block.conv_2_w, block.conv_2_b, stride);

        // residual connection
        inpL = ggml_add(ctx0, current, shortcut);
    }

    // final conv
    inpL = ggml_elu(ctx0, inpL);

    struct ggml_tensor *decoded_inp = strided_conv_1d(
        ctx0, inpL, decoder->final_conv_w, decoder->final_conv_b, stride);

    return decoded_inp;
}


================================================
FILE: encodec.cpp
================================================
#include "ggml-alloc.h"
#include "ggml-backend.h"
#include "ggml.h"
#include "ggml/src/ggml-impl.h"

#ifdef GGML_USE_CUBLAS
#include "ggml-cuda.h"
#endif

#ifdef GGML_USE_METAL
#include "ggml-metal.h"
#endif

#include <cassert>
#include <cmath>
#include <cstring>
#include <fstream>
#include <iostream>
#include <map>
#include <memory>
#include <stdexcept>
#include <string>
#include <vector>
#include <thread>

#include "encodec.h"

#include "decoder.h"
#include "encoder.h"
#include "lstm.h"
#include "ops.h"
#include "utils.h"
#include "quantizer.h"

#define ENCODEC_FILE_MAGIC 'ggml'
#define ENCODEC_MAX_NODES 80000

typedef enum {
    // Run the end-to-end encoder-decoder pipeline
    FULL = 0,
    // Encode an audio (encoder + quantizer encode)
    ENCODE = 1,
    // Decode an audio from a compressed representation (quantizer decode + decoder)
    DECODE = 2,
} encodec_run_mode_t;

struct encodec_hparams {
    // The number of input channels is always 1 (mono).
    int32_t in_channels = 1;
    // The hidden dimension for the codebook.
    int32_t hidden_dim = 128;
    // The number of filters for the first convolution.
    int32_t n_filters = 32;
    // The filter size for upsampling and downsampling.
    int32_t ratios[4] = {8, 5, 4, 2};
    // The kernel size for the first convolution.
    int32_t kernel_size = 7;
    // The kernel size for the residual blocks.
    int32_t residual_kernel_size = 3;
    // Compression
    int32_t compress = 2;
    // The number of layers in the LSTM modules.
    int32_t n_lstm_layers = 2;
    // The stride of the first convolution.
    int32_t stride = 1;

    // The dimension of the codebook.
    int32_t n_bins = 1024;
    // The sample rate of the model.
    int32_t sr = 24000;
    // The bandwidth.
    int32_t bandwidth = 24;

    // The number of codebooks.
    int32_t n_q = 32;
    // The product of the ratios.
    int32_t hop_length = 1;

    // File type of model weights.
    int32_t ftype;
};

struct encodec_model {
    encodec_hparams hparams;

    encodec_encoder encoder;
    encodec_quantizer quantizer;
    encodec_decoder decoder;

    // context
    struct ggml_context *ctx;
    int n_loaded;

    ggml_backend_t backend = NULL;

    ggml_backend_buffer_t buffer_w;

    std::map<std::string, struct ggml_tensor *> tensors;
};

struct encodec_ggml_cgraph_deleter {
    void operator()(struct ggml_cgraph * cgraph) {
        if (cgraph->nodes)
            free(cgraph->nodes);
        if (cgraph->leafs)
            free(cgraph->leafs);
        if (cgraph->visited_hash_set.keys)
            free(cgraph->visited_hash_set.keys);
        if (cgraph->grads)
            free(cgraph->grads);
        free(cgraph);
    }
};

struct encodec_context {
    encodec_model model;

    // computational graph stored on the heap to avoid stack overflows
    // the computational graph grows with the sequence length (because of the LSTM)
    // which requires a lot of nodes
    std::unique_ptr<struct ggml_cgraph, encodec_ggml_cgraph_deleter> gf;

    // buffer for model evaluation
    ggml_backend_buffer_t buf_compute;

    // tensor graph allocator
    ggml_gallocr_t allocr = NULL;

    // intermediate steps
    struct ggml_tensor *encoded = NULL;  // Encoded audio
    struct ggml_tensor *codes = NULL;    // Quantized representation of audio in codebook
    struct ggml_tensor *decoded = NULL;  // Reconstructed audio from codes

    std::vector<int32_t> out_codes;
    std::vector<float> out_audio;

    // statistics
    encodec_statistics stats;
};

bool encodec_load_model_weights(std::ifstream &infile, encodec_model &model, int n_gpu_layers) {
    // verify magic (i.e. ggml signature in hex format)
    {
        uint32_t magic;
        read_safe(infile, magic);
        if (magic != ENCODEC_FILE_MAGIC) {
            fprintf(stderr, "%s: invalid model file (bad magic)\n", __func__);
            return false;
        }
    }

    // load hparams
    {
        auto &hparams = model.hparams;

        read_safe(infile, hparams.in_channels);
        read_safe(infile, hparams.hidden_dim);
        read_safe(infile, hparams.n_filters);
        read_safe(infile, hparams.kernel_size);
        read_safe(infile, hparams.residual_kernel_size);
        // read_safe(infile, hparams.ratios);
        read_safe(infile, hparams.n_bins);
        read_safe(infile, hparams.bandwidth);
        read_safe(infile, hparams.sr);
        read_safe(infile, hparams.ftype);

        const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR;

        // printf("%s: in_channels = %d\n", __func__, hparams.in_channels);
        // printf("%s: hidden_dim  = %d\n", __func__, hparams.hidden_dim);
        // printf("%s: n_filters   = %d\n", __func__, hparams.n_filters);
        // printf("%s: kernel_size = %d\n", __func__, hparams.kernel_size);
        // printf("%s: res_kernel  = %d\n", __func__, hparams.residual_kernel_size);
        // // printf("%s: ratios      = %d\n", __func__, hparams.ratios);
        // printf("%s: n_bins      = %d\n", __func__, hparams.n_bins);
        // printf("%s: bandwidth   = %d\n", __func__, hparams.bandwidth);
        // printf("%s: sample_rate = %d\n", __func__, hparams.sr);
        // printf("%s: ftype       = %d\n", __func__, hparams.ftype);
        // printf("%s: qntvr       = %d\n", __func__, qntvr);

        hparams.ftype %= GGML_QNT_VERSION_FACTOR;
    }

    // for the big tensors, we have the option to store the data in 16-bit floats or quantized
    // in order to save memory and also to speed up the computation
    ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype)(model.hparams.ftype));
    if (wtype == GGML_TYPE_COUNT) {
        fprintf(stderr, "%s: invalid model file (bad ftype value %d)\n",
                __func__, model.hparams.ftype);
        return 1;
    }

    auto &ctx = model.ctx;

    // create the ggml context
    {
        size_t n_tensors = ((4 * 2) * 4 + 2 + 4 * model.hparams.n_lstm_layers + 2) * 2;  // encoder and decoder
        n_tensors += model.hparams.n_q * 1;                                              // quantizer
        struct ggml_init_params params = {
            /* .mem_size   = */ ggml_tensor_overhead() * n_tensors,
            /* .mem_buffer = */ NULL,
            /* .no_alloc   = */ true,
        };

        model.ctx = ggml_init(params);
        if (!model.ctx) {
            fprintf(stderr, "%s: ggml_init() failed\n", __func__);
            return false;
        }
    }

#ifdef GGML_USE_CUBLAS
    if (n_gpu_layers > 0) {
        fprintf(stderr, "%s: using CUDA backend\n", __func__);
        model.backend = ggml_backend_cuda_init();
        if (!model.backend) {
            fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__);
        }
    }
#endif

#ifdef GGML_USE_METAL
    if (n_gpu_layers > 0) {
        fprintf(stderr, "%s: using Metal backend\n", __func__);
        model.backend = ggml_backend_metal_init();
        if (!model.backend) {
            fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
        }
    }
#endif

    if (!model.backend) {
        // fallback to CPU backend
        fprintf(stderr, "%s: using CPU backend\n", __func__);
        model.backend = ggml_backend_cpu_init();
    }

    if (!model.backend) {
        fprintf(stderr, "%s: ggml_backend_cpu_init() failed\n", __func__);
        return false;
    }

    // create the tensors for the model
    {
        const auto & hparams = model.hparams;

        const int in_channels   = hparams.in_channels;
        const int hidden_dim    = hparams.hidden_dim;
        const int n_filters     = hparams.n_filters;
        const int kernel_size   = hparams.kernel_size;
        const int res_kernel_sz = hparams.residual_kernel_size;
        const int n_q           = hparams.n_q;
        const int *ratios       = hparams.ratios;
        const int n_bins        = hparams.n_bins;

        // encoder
        {
            model.encoder.blocks.resize(4);

            int mult = 1;  // scaling factor for hidden size

            model.encoder.init_conv_w = ggml_new_tensor_3d(ctx, wtype, kernel_size, in_channels, mult * n_filters);
            model.encoder.init_conv_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, mult * n_filters);

            model.tensors["encoder.model.0.conv.conv.weight"] = model.encoder.init_conv_w;
            model.tensors["encoder.model.0.conv.conv.bias"] = model.encoder.init_conv_b;

            for (int i = 0; i < 4; i++) {
                // conv1
                model.encoder.blocks[i].conv_1_w = ggml_new_tensor_3d(ctx, wtype, res_kernel_sz, mult * n_filters, mult * n_filters / 2);
                model.encoder.blocks[i].conv_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, mult * n_filters / 2);

                model.tensors["encoder.model." + std::to_string(3 * i + 1) + ".block.1.conv.conv.weight"] = model.encoder.blocks[i].conv_1_w;
                model.tensors["encoder.model." + std::to_string(3 * i + 1) + ".block.1.conv.conv.bias"] = model.encoder.blocks[i].conv_1_b;

                // conv2
                model.encoder.blocks[i].conv_2_w = ggml_new_tensor_3d(ctx, wtype, 1, mult * n_filters / 2, mult * n_filters);
                model.encoder.blocks[i].conv_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, mult * n_filters);

                model.tensors["encoder.model." + std::to_string(3 * i + 1) + ".block.3.conv.conv.weight"] = model.encoder.blocks[i].conv_2_w;
                model.tensors["encoder.model." + std::to_string(3 * i + 1) + ".block.3.conv.conv.bias"] = model.encoder.blocks[i].conv_2_b;

                // shortcut conv
                model.encoder.blocks[i].conv_sc_w = ggml_new_tensor_3d(ctx, wtype, 1, mult * n_filters, mult * n_filters);
                model.encoder.blocks[i].conv_sc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, mult * n_filters);

                model.tensors["encoder.model." + std::to_string(3 * i + 1) + ".shortcut.conv.conv.weight"] = model.encoder.blocks[i].conv_sc_w;
                model.tensors["encoder.model." + std::to_string(3 * i + 1) + ".shortcut.conv.conv.bias"] = model.encoder.blocks[i].conv_sc_b;

                // downsampling
                model.encoder.blocks[i].ds_conv_w = ggml_new_tensor_3d(ctx, wtype, 2 * ratios[3 - i], mult * n_filters, mult * n_filters * 2);
                model.encoder.blocks[i].ds_conv_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, mult * n_filters * 2);

                model.tensors["encoder.model." + std::to_string(3 * (i + 1)) + ".conv.conv.weight"] = model.encoder.blocks[i].ds_conv_w;
                model.tensors["encoder.model." + std::to_string(3 * (i + 1)) + ".conv.conv.bias"] = model.encoder.blocks[i].ds_conv_b;

                mult *= 2;
            }

            // LSTM
            model.encoder.lstm.l0_ih_w = ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters);
            model.encoder.lstm.l1_ih_w = ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters);

            model.tensors["encoder.model.13.lstm.weight_ih_l0"] = model.encoder.lstm.l0_ih_w;
            model.tensors["encoder.model.13.lstm.weight_ih_l1"] = model.encoder.lstm.l1_ih_w;

            model.encoder.lstm.l0_hh_w = ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters);
            model.encoder.lstm.l1_hh_w = ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters);

            model.tensors["encoder.model.13.lstm.weight_hh_l0"] = model.encoder.lstm.l0_hh_w;
            model.tensors["encoder.model.13.lstm.weight_hh_l1"] = model.encoder.lstm.l1_hh_w;

            model.encoder.lstm.l0_ih_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4 * mult * n_filters);
            model.encoder.lstm.l1_ih_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4 * mult * n_filters);

            model.tensors["encoder.model.13.lstm.bias_ih_l0"] = model.encoder.lstm.l0_ih_b;
            model.tensors["encoder.model.13.lstm.bias_ih_l1"] = model.encoder.lstm.l1_ih_b;

            model.encoder.lstm.l0_hh_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4 * mult * n_filters);
            model.encoder.lstm.l1_hh_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4 * mult * n_filters);

            model.tensors["encoder.model.13.lstm.bias_hh_l0"] = model.encoder.lstm.l0_hh_b;
            model.tensors["encoder.model.13.lstm.bias_hh_l1"] = model.encoder.lstm.l1_hh_b;

            // final conv
            model.encoder.final_conv_w = ggml_new_tensor_3d(ctx, wtype, kernel_size, mult * n_filters, hidden_dim);
            model.encoder.final_conv_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hidden_dim);

            model.tensors["encoder.model.15.conv.conv.weight"] = model.encoder.final_conv_w;
            model.tensors["encoder.model.15.conv.conv.bias"] = model.encoder.final_conv_b;
        }

        // decoder
        {
            model.decoder.blocks.resize(4);

            int mult = 16;  // 2**len(ratios)

            model.decoder.init_conv_w = ggml_new_tensor_3d(ctx, wtype, kernel_size, hidden_dim, mult * n_filters);
            model.decoder.init_conv_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, mult * n_filters);

            model.tensors["decoder.model.0.conv.conv.weight"] = model.decoder.init_conv_w;
            model.tensors["decoder.model.0.conv.conv.bias"] = model.decoder.init_conv_b;

            // LSTM
            model.decoder.lstm.l0_ih_w = ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters);
            model.decoder.lstm.l1_ih_w = ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters);

            model.tensors["decoder.model.1.lstm.weight_ih_l0"] = model.decoder.lstm.l0_ih_w;
            model.tensors["decoder.model.1.lstm.weight_ih_l1"] = model.decoder.lstm.l1_ih_w;

            model.decoder.lstm.l0_hh_w = ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters);
            model.decoder.lstm.l1_hh_w = ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters);

            model.tensors["decoder.model.1.lstm.weight_hh_l0"] = model.decoder.lstm.l0_hh_w;
            model.tensors["decoder.model.1.lstm.weight_hh_l1"] = model.decoder.lstm.l1_hh_w;

            model.decoder.lstm.l0_ih_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4 * mult * n_filters);
            model.decoder.lstm.l1_ih_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4 * mult * n_filters);

            model.tensors["decoder.model.1.lstm.bias_ih_l0"] = model.decoder.lstm.l0_ih_b;
            model.tensors["decoder.model.1.lstm.bias_ih_l1"] = model.decoder.lstm.l1_ih_b;

            model.decoder.lstm.l0_hh_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4 * mult * n_filters);
            model.decoder.lstm.l1_hh_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4 * mult * n_filters);

            model.tensors["decoder.model.1.lstm.bias_hh_l0"] = model.decoder.lstm.l0_hh_b;
            model.tensors["decoder.model.1.lstm.bias_hh_l1"] = model.decoder.lstm.l1_hh_b;

            for (int i = 0; i < 4; i++) {
                // upsampling
                model.decoder.blocks[i].us_conv_w = ggml_new_tensor_3d(ctx, wtype, ratios[i] * 2, mult * n_filters / 2, mult * n_filters);
                model.decoder.blocks[i].us_conv_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, mult * n_filters / 2);

                model.tensors["decoder.model." + std::to_string(3 * (i + 1)) + ".convtr.convtr.weight"] = model.decoder.blocks[i].us_conv_w;
                model.tensors["decoder.model." + std::to_string(3 * (i + 1)) + ".convtr.convtr.bias"] = model.decoder.blocks[i].us_conv_b;

                // conv1
                model.decoder.blocks[i].conv_1_w = ggml_new_tensor_3d(ctx, wtype, res_kernel_sz, mult * n_filters / 2, mult * n_filters / 4);
                model.decoder.blocks[i].conv_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, mult * n_filters / 4);

                model.tensors["decoder.model." + std::to_string(3 * (i + 1) + 1) + ".block.1.conv.conv.weight"] = model.decoder.blocks[i].conv_1_w;
                model.tensors["decoder.model." + std::to_string(3 * (i + 1) + 1) + ".block.1.conv.conv.bias"] = model.decoder.blocks[i].conv_1_b;

                // conv2
                model.decoder.blocks[i].conv_2_w = ggml_new_tensor_3d(ctx, wtype, 1, mult * n_filters / 4, mult * n_filters / 2);
                model.decoder.blocks[i].conv_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, mult * n_filters / 2);

                model.tensors["decoder.model." + std::to_string(3 * (i + 1) + 1) + ".block.3.conv.conv.weight"] = model.decoder.blocks[i].conv_2_w;
                model.tensors["decoder.model." + std::to_string(3 * (i + 1) + 1) + ".block.3.conv.conv.bias"] = model.decoder.blocks[i].conv_2_b;

                // shortcut
                model.decoder.blocks[i].conv_sc_w = ggml_new_tensor_3d(ctx, wtype, 1, mult * n_filters / 2, mult * n_filters / 2);
                model.decoder.blocks[i].conv_sc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, mult * n_filters / 2);

                model.tensors["decoder.model." + std::to_string(3 * (i + 1) + 1) + ".shortcut.conv.conv.weight"] = model.decoder.blocks[i].conv_sc_w;
                model.tensors["decoder.model." + std::to_string(3 * (i + 1) + 1) + ".shortcut.conv.conv.bias"] = model.decoder.blocks[i].conv_sc_b;

                mult /= 2;
            }

            model.decoder.final_conv_w = ggml_new_tensor_3d(ctx, wtype, kernel_size, n_filters, in_channels);
            model.decoder.final_conv_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, in_channels);

            model.tensors["decoder.model.15.conv.conv.weight"] = model.decoder.final_conv_w;
            model.tensors["decoder.model.15.conv.conv.bias"] = model.decoder.final_conv_b;
        }

        // quantizer
        {
            model.quantizer.blocks.resize(n_q);

            for (int i = 0; i < n_q; i++) {
                model.quantizer.blocks[i].embed = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, hidden_dim, n_bins);

                model.tensors["quantizer.vq.layers." + std::to_string(i) + "._codebook.embed"] = model.quantizer.blocks[i].embed;
            }
        }
    }

    // allocate the model tensors in a backend buffer
    model.buffer_w = ggml_backend_alloc_ctx_tensors(ctx, model.backend);

    // load weights
    {
        size_t total_size = 0;
        model.n_loaded = 0;

        std::vector<char> read_buf;

        while (true) {
            int32_t n_dims;
            int32_t length;
            int32_t ftype;

            read_safe(infile, n_dims);
            read_safe(infile, length);
            read_safe(infile, ftype);

            if (infile.eof()) {
                break;
            }

            int32_t nelements = 1;
            int32_t ne[3] = {1, 1, 1};
            for (int i = 0; i < n_dims; i++) {
                read_safe(infile, ne[i]);
                nelements *= ne[i];
            }

            std::string name;
            std::vector<char> buf(length);
            infile.read(&buf[0], buf.size());
            name.assign(&buf[0], buf.size());

            if (model.tensors.find(name.data()) == model.tensors.end()) {
                fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data());
                return false;
            }

            auto tensor = model.tensors[name.data()];
            ggml_set_name(tensor, name.c_str());
            if (ggml_nelements(tensor) != nelements) {
                fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
                return false;
            }

            if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1] || tensor->ne[2] != ne[2]) {
                fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%lld, %lld, %lld], expected [%d, %d, %d]\n",
                        __func__, name.data(), tensor->ne[0], tensor->ne[1], tensor->ne[2], ne[0], ne[1], ne[2]);
                return false;
            }

            const size_t bpe = ggml_type_size(ggml_type(ftype));

            if ((nelements * bpe) / ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) {
                fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
                        __func__, name.data(), ggml_nbytes(tensor), nelements * bpe);
                return false;
            }

            if (ggml_backend_buffer_is_host(model.buffer_w)) {
                // for some backends such as CPU and Metal, the tensor data is in system memory and we can read directly into it
                infile.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
            } else {
                // read into a temporary buffer first, then copy to device memory
                read_buf.resize(ggml_nbytes(tensor));
                infile.read(read_buf.data(), ggml_nbytes(tensor));
                ggml_backend_tensor_set(tensor, read_buf.data(), 0, ggml_nbytes(tensor));
            }

            total_size += ggml_nbytes(tensor);
            model.n_loaded++;
        }

        printf("%s: model size = %.2f MB\n", __func__, total_size / 1024.0 / 1024.0);
    }

    infile.close();

    return true;
}

// Create a new ggml_cgraph with the given size (usually ENCODEC_MAX_NODES). We need a
// custom function since the graph is so large, it overpasses the max built-in ggml
// default size.
static struct ggml_cgraph * encodec_ggml_cgraph_create(size_t size) {
    struct ggml_cgraph * cgraph = (struct ggml_cgraph *)calloc(1, sizeof(struct ggml_cgraph));
    cgraph->size = size;
    cgraph->n_nodes = 0;
    cgraph->n_leafs = 0;
    cgraph->nodes = (struct ggml_tensor **)calloc(1, size * sizeof(struct ggml_tensor *));
    cgraph->leafs = (struct ggml_tensor **)calloc(1, size * sizeof(struct ggml_tensor *));

    // next primes after powers of two
    static const size_t primes[] = {
        2, 3, 5, 11, 17, 37, 67, 131, 257, 521, 1031,
        2053, 4099, 8209, 16411, 32771, 65537, 131101,
        262147, 524309, 1048583, 2097169, 4194319, 8388617,
        16777259, 33554467, 67108879, 134217757, 268435459,
        536870923, 1073741827, 2147483659
    };
    static const size_t n_primes = sizeof(primes)/sizeof(primes[0]);

    // find the smallest prime that is larger or equal to size
    size_t l = 0;
    size_t r = n_primes;
    while (l < r) {
        size_t m = (l + r)/2;
        if (primes[m] < size * 2) {
            l = m + 1;
        } else {
            r = m;
        }
    }
    size_t hash_size = l < n_primes ? primes[l] : (size * 2 + 1);

    cgraph->visited_hash_set.size = hash_size;
    cgraph->visited_hash_set.keys = (struct ggml_tensor **)calloc(1, hash_size * sizeof(struct ggml_tensor *));
    cgraph->visited_hash_set.used = (ggml_bitset_t *)calloc(1, ggml_bitset_size(hash_size) * sizeof(ggml_bitset_t));
    cgraph->order = GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT;

    return cgraph;
}

void encodec_build_graph(struct encodec_context *ectx,
                         const float * inp_audio,
                         const int n_samples,
                         const encodec_run_mode_t mode) {
    assert(mode == encodec_run_mode_t::FULL || mode == encodec_run_mode_t::ENCODE);

    const auto & model   = ectx->model;
    const auto & hparams = model.hparams;
    const auto & allocr  = ectx->allocr;

    auto & gf = ectx->gf;

    const int *ratios       = hparams.ratios;
    const int kernel_size   = hparams.kernel_size;
    const int res_kernel_sz = hparams.residual_kernel_size;
    const int stride        = hparams.stride;
    const int n_bins        = hparams.n_bins;
    const int n_q           = hparams.n_q;
    const int sr            = hparams.sr;
    const int bandwidth     = hparams.bandwidth;
    const int hop_length    = hparams.hop_length;
    const int hidden_dim    = hparams.hidden_dim;

    // since we are using ggml-alloc, this buffer only needs enough space to hold the
    // ggml_tensor and ggml_cgraph structs, but not the tensor data
    static size_t buf_size = ggml_tensor_overhead() * ENCODEC_MAX_NODES + ggml_graph_overhead();
    static std::vector<uint8_t> buf(buf_size);

    struct ggml_init_params ggml_params = {
        /*.mem_size   =*/ buf_size,
        /*.mem_buffer =*/ buf.data(),
        /*.no_alloc   =*/ true,  // the tensors will be allocated later by ggml_gallocr_alloc_graph()
    };

    struct ggml_context *ctx0 = ggml_init(ggml_params);

    gf = std::unique_ptr<struct ggml_cgraph, encodec_ggml_cgraph_deleter>(encodec_ggml_cgraph_create(ENCODEC_MAX_NODES));

    struct ggml_tensor *inp = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, n_samples);
    ggml_set_name(inp, "inp");
    ggml_set_input(inp);

    const struct encodec_encoder   * encoder   = &model.encoder;
    const struct encodec_quantizer * quantizer = &model.quantizer;
    const struct encodec_decoder   * decoder   = &model.decoder;

    struct ggml_tensor * encoded = encodec_forward_encoder(
        encoder, ctx0, inp, ratios, kernel_size, res_kernel_sz, stride);

    struct ggml_tensor * codes = encodec_forward_quantizer_encode(
        quantizer, ctx0, encoded, n_bins, sr, bandwidth, hop_length);

    struct ggml_tensor * quantized = encodec_forward_quantizer_decode(
        quantizer, ctx0, codes, hidden_dim, n_bins, sr, bandwidth, hop_length);

    struct ggml_tensor * decoded = encodec_forward_decoder(
        decoder, ctx0, quantized, ratios, kernel_size, res_kernel_sz, stride);

    switch (mode) {
        case encodec_run_mode_t::FULL: {
            ggml_set_name(decoded, "decoded");
            ggml_set_output(decoded);
            ggml_build_forward_expand(gf.get(), decoded);
        } break;
        case encodec_run_mode_t::ENCODE: {
            ggml_set_name(codes, "codes");
            ggml_set_output(codes);
            ggml_build_forward_expand(gf.get(), codes);
        } break;
        case encodec_run_mode_t::DECODE: {
            assert(false);
        } break;
        default: {
            fprintf(stderr, "%s: unknown run mode\n", __func__);
        } break;
    }

    ggml_free(ctx0);

    ectx->encoded = encoded;
    ectx->codes   = codes;
    ectx->decoded = decoded;
}

void encodec_build_graph(struct encodec_context *ectx, const int32_t *codes,
                         const int n_codes, const encodec_run_mode_t mode) {
    assert(mode == encodec_run_mode_t::DECODE);

    const auto & model   = ectx->model;
    const auto & hparams = model.hparams;
    const auto & allocr  = ectx->allocr;

    auto & gf = ectx->gf;

    const int n_bins        = hparams.n_bins;
    const int sr            = hparams.sr;
    const int bandwidth     = hparams.bandwidth;
    const int hop_length    = hparams.hop_length;
    const int hidden_dim    = hparams.hidden_dim;
    const int * ratios      = hparams.ratios;
    const int kernel_size   = hparams.kernel_size;
    const int res_kernel_sz = hparams.residual_kernel_size;
    const int stride        = hparams.stride;

    const int frame_rate = (int)ceilf(sr / hop_length);
    const int n_q = get_num_quantizers_for_bandwidth(n_bins, frame_rate, bandwidth);

    if (n_codes % n_q != 0) {
        fprintf(stderr, "%s: invalid number of codes\n", __func__);
        assert(false);
    }

    const int N = n_codes / n_q;

    static size_t buf_size = ggml_tensor_overhead() * ENCODEC_MAX_NODES + ggml_graph_overhead();
    static std::vector<uint8_t> buf(buf_size);

    struct ggml_init_params ggml_params = {
        /*.mem_size   =*/ buf_size,
        /*.mem_buffer =*/ buf.data(),
        /*.no_alloc   =*/ true,
    };

    struct ggml_context *ctx0 = ggml_init(ggml_params);

    gf = std::unique_ptr<struct ggml_cgraph, encodec_ggml_cgraph_deleter>(encodec_ggml_cgraph_create(ENCODEC_MAX_NODES));

    struct ggml_tensor *inp_codes = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, N, n_q);
    ggml_set_name(inp_codes, "inp_codes");
    ggml_set_input(inp_codes);

    const struct encodec_quantizer * quantizer = &model.quantizer;
    const struct encodec_decoder   * decoder   = &model.decoder;

    struct ggml_tensor *quantized = encodec_forward_quantizer_decode(
        quantizer, ctx0, inp_codes, hidden_dim, n_bins, sr, bandwidth, hop_length
    );

    struct ggml_tensor *decoded = encodec_forward_decoder(
        decoder, ctx0, quantized, ratios, kernel_size, res_kernel_sz, stride
    );

    switch (mode) {
        case encodec_run_mode_t::DECODE: {
            ggml_set_name(decoded, "decoded");
            ggml_set_output(decoded);
            ggml_build_forward_expand(gf.get(), decoded);
        } break;
        default: {
            fprintf(stderr, "%s: unknown run mode\n", __func__);
            assert(false);
        } break;
    }

    ggml_free(ctx0);

    ectx->codes   = inp_codes;
    ectx->decoded = decoded;
}

static void encodec_zero_tensor(struct ggml_cgraph *gf, const char *name) {
    struct ggml_tensor *tensor = ggml_graph_get_tensor(gf, name);
    ggml_set_zero(tensor);
}

bool encodec_eval_internal(struct encodec_context *ectx, const float * raw_audio,
                           const int n_samples, const int n_threads,
                           const encodec_run_mode_t mode) {
    assert(mode == encodec_run_mode_t::FULL || mode == encodec_run_mode_t::ENCODE);
    auto & model  = ectx->model;
    auto & allocr = ectx->allocr;
    auto & gf     = ectx->gf;

    encodec_build_graph(ectx, raw_audio, n_samples, mode);

    // allocate the graph tensors
    ggml_gallocr_alloc_graph(allocr, gf.get());

    // set the graph inputs
    struct ggml_tensor * inp = ggml_graph_get_tensor(gf.get(), "inp");
    ggml_backend_tensor_set(inp, raw_audio, 0, n_samples * ggml_element_size(inp));

    // make sure accumulation tensor are zeroed
    encodec_zero_tensor(gf.get(), "enc_l0_ht");
    encodec_zero_tensor(gf.get(), "enc_l1_ht");
    encodec_zero_tensor(gf.get(), "enc_l0_ct");
    encodec_zero_tensor(gf.get(), "enc_l1_ct");

    if (mode == encodec_run_mode_t::FULL) {
        encodec_zero_tensor(gf.get(), "dec_l0_ht");
        encodec_zero_tensor(gf.get(), "dec_l1_ht");
        encodec_zero_tensor(gf.get(), "dec_l0_ct");
        encodec_zero_tensor(gf.get(), "dec_l1_ct");

        encodec_zero_tensor(gf.get(), "quantized_out");
    }

    // run the computation
    if (ggml_backend_is_cpu(model.backend)) {
        ggml_backend_cpu_set_n_threads(model.backend, n_threads);
    }

    ggml_backend_graph_compute(model.backend, gf.get());

    return true;
}

bool encodec_eval_internal(struct encodec_context *ectx, const int32_t *codes,
                           const int n_codes, const int n_threads,
                           const encodec_run_mode_t mode) {
    assert(mode == encodec_run_mode_t::DECODE);

    auto & model  = ectx->model;
    auto & allocr = ectx->allocr;
    auto & gf     = ectx->gf;

    encodec_build_graph(ectx, codes, n_codes, mode);

    // allocate the graph tensors
    ggml_gallocr_alloc_graph(allocr, gf.get());

    // set the graph inputs
    struct ggml_tensor * inp = ggml_graph_get_tensor(gf.get(), "inp_codes");
    ggml_backend_tensor_set(inp, codes, 0, n_codes * ggml_element_size(inp));

    // make sure accumulation tensors are zeroed
    encodec_zero_tensor(gf.get(), "dec_l0_ht");
    encodec_zero_tensor(gf.get(), "dec_l1_ht");
    encodec_zero_tensor(gf.get(), "dec_l0_ct");
    encodec_zero_tensor(gf.get(), "dec_l1_ct");

    encodec_zero_tensor(gf.get(), "quantized_out");

    // run the computation
    if (ggml_backend_is_cpu(model.backend)) {
        ggml_backend_cpu_set_n_threads(model.backend, n_threads);
    }

    ggml_backend_graph_compute(model.backend, gf.get());

    return true;
}

bool encodec_eval(struct encodec_context *ectx, const float *raw_audio,
                  const int n_samples, const int n_threads,
                  const encodec_run_mode_t mode) {
    const int64_t t_start_us = ggml_time_us();

    // allocate the compute buffer
    {
        // create a graph allocator with the backend's default buffer type
        ectx->allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(ectx->model.backend));

        // create the graph for memory usage estimation
        encodec_build_graph(ectx, raw_audio, n_samples, mode);

        // pre-allocate the compute buffer
        ggml_gallocr_reserve(ectx->allocr, ectx->gf.get());
        size_t mem_size = ggml_gallocr_get_buffer_size(ectx->allocr, 0);
        fprintf(stderr, "%s: compute buffer size: %.2f MB\n\n", __func__, mem_size / 1024.0 / 1024.0);
    }

    // encodec eval
    if (!encodec_eval_internal(ectx, raw_audio, n_samples, n_threads, mode)) {
        fprintf(stderr, "%s: failed to run encodec eval\n", __func__);
        return false;
    }

    ectx->stats.t_compute_us = ggml_time_us() - t_start_us;

    return true;
}

bool encodec_eval(struct encodec_context *ectx, const int32_t *codes,
                  const int n_codes, const int n_threads,
                  const encodec_run_mode_t mode) {
    const int64_t t_start_ms = ggml_time_us();

    // allocate the compute buffer
    {
        // create a graph allocator with the backend's default buffer type
        ectx->allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(ectx->model.backend));

        // create the graph for memory usage estimation
        encodec_build_graph(ectx, codes, n_codes, mode);

        // pre-allocate the compute buffer
        ggml_gallocr_reserve(ectx->allocr, ectx->gf.get());
        size_t mem_size = ggml_gallocr_get_buffer_size(ectx->allocr, 0);
        fprintf(stderr, "%s: compute buffer size: %.2f MB\n\n", __func__, mem_size / 1024.0 / 1024.0);
    }

    // encodec eval
    if (!encodec_eval_internal(ectx, codes, n_codes, n_threads, mode)) {
        fprintf(stderr, "%s: failed to run encodec eval\n", __func__);
        return false;
    }

    ectx->stats.t_compute_us = ggml_time_us() - t_start_ms;

    return true;
}

bool encodec_reconstruct_audio(struct encodec_context *ectx, const float *raw_audio,
                                const int n_samples, const int n_threads) {
    if (raw_audio == nullptr) {
        fprintf(stderr, "%s: null input audio\n", __func__);
        return false;
    }

    if (!encodec_eval(ectx, raw_audio, n_samples, n_threads, encodec_run_mode_t::FULL)) {
        fprintf(stderr, "%s: failed to run encodec eval\n", __func__);
        return false;
    }

    if (!ectx->decoded) {
        fprintf(stderr, "%s: null decoded tensor\n", __func__);
        return false;
    }

    struct ggml_tensor *decoded = ectx->decoded;

    auto &out_audio = ectx->out_audio;

    int out_length = decoded->ne[0];
    out_audio.resize(out_length);

    ggml_backend_tensor_get(decoded, out_audio.data(), 0, out_length * ggml_element_size(decoded));

    return true;
}

bool encodec_compress_audio(struct encodec_context *ectx, const float *raw_audio,
                             const int n_samples, const int n_threads) {
    if (!encodec_eval(ectx, raw_audio, n_samples, n_threads, encodec_run_mode_t::ENCODE)) {
        fprintf(stderr, "%s: failed to run encodec eval\n", __func__);
        return false;
    }

    if (!ectx->codes) {
        fprintf(stderr, "%s: null codes tensor\n", __func__);
        return false;
    }

    struct ggml_tensor *codes = ectx->codes;

    auto &out_codes = ectx->out_codes;

    int out_length = codes->ne[0] * codes->ne[1];
    out_codes.resize(out_length);

    ggml_backend_tensor_get(codes, out_codes.data(), 0, out_length * ggml_element_size(codes));

    return true;
}

bool encodec_decompress_audio(struct encodec_context *ectx, const int32_t *codes,
                              const int n_codes, const int n_threads) {
    if (!encodec_eval(ectx, codes, n_codes, n_threads, encodec_run_mode_t::DECODE)) {
        fprintf(stderr, "%s: failed to run encodec eval\n", __func__);
        return false;
    }

    if (!ectx->decoded) {
        fprintf(stderr, "%s: null decoded tensor\n", __func__);
        return false;
    }

    struct ggml_tensor *decoded = ectx->decoded;

    auto &out_audio = ectx->out_audio;

    int out_length = decoded->ne[0];
    out_audio.resize(out_length);

    ggml_backend_tensor_get(decoded, out_audio.data(), 0, out_length * ggml_element_size(decoded));

    return true;
}

// The offset parameter is used to adapt to two scenarios:
// 1. If offset is 0, it is assumed the file only contains the Encodec weights, hence
//    the model is loaded from the beginning of the file.
// 2. If offset is gt 0, it is assumed the file contains the weights and then the Encodec
//    model, hence the model is loaded from the offset. This is the case for Bark.
// Note that we used to have an encodec_load_model taking a reference to a file stream
// but it was removed to comply the C-header requirements.
struct encodec_context *encodec_load_model(const char* model_path, const int offset, int n_gpu_layers) {
    int64_t t_start_load_us = ggml_time_us();

    auto infile = std::ifstream(model_path, std::ios::binary);
    if (!infile) {
        fprintf(stderr, "%s: failed to open '%s'\n", __func__, model_path);
        return nullptr;
    }

    if (offset > 0) {
        infile.seekg(offset);
    }

    struct encodec_context *ectx = new encodec_context();

    ectx->model = encodec_model();
    if (!encodec_load_model_weights(infile, ectx->model, n_gpu_layers)) {
        fprintf(stderr, "%s: failed to load model weights from '%s'\n", __func__, model_path);
        return {};
    }

    // pre-compute the number of codebooks required
    int bandwidth = ectx->model.hparams.bandwidth;
    int sr = ectx->model.hparams.sr;

    int hop_length = 1;

    for (int i = 0; i < 4; i++) {
        hop_length *= ectx->model.hparams.ratios[i];
    }

    ectx->model.hparams.hop_length = hop_length;
    ectx->model.hparams.n_q        = get_num_codebooks(bandwidth, hop_length, sr);

    ectx->stats.t_load_us          = ggml_time_us() - t_start_load_us;

    return ectx;
}

void encodec_free(struct encodec_context *ectx) {
    if (!ectx) {
        return;
    }

    if (ectx->model.ctx) {
        ggml_free(ectx->model.ctx);
    }

    if (ectx->buf_compute) {
        ggml_backend_buffer_free(ectx->buf_compute);
    }

    ggml_backend_buffer_free(ectx->model.buffer_w);
    ggml_backend_free(ectx->model.backend);

    delete ectx;
}

void encodec_set_target_bandwidth(struct encodec_context *ectx, int bandwidth) {
    ectx->model.hparams.bandwidth = bandwidth;
}

void encodec_set_sample_rate(struct encodec_context *ectx, int sample_rate) {
    ectx->model.hparams.sr = sample_rate;
}

const struct encodec_statistics* encodec_get_statistics(struct encodec_context *ectx) {
    if (!ectx) {
        fprintf(stderr, "%s: null context\n", __func__);
        return nullptr;
    }
    return &ectx->stats;
}

void encodec_reset_statistics(struct encodec_context *ectx) {
    if (!ectx) {
        fprintf(stderr, "%s: null context\n", __func__);
        return;
    }
    memset(&ectx->stats, 0, sizeof(ectx->stats));
}

float * encodec_get_audio(struct encodec_context *ectx) {
    if (!ectx) {
        fprintf(stderr, "%s: null context\n", __func__);
        return nullptr;
    }
    return ectx->out_audio.data();
}

int encodec_get_audio_size(struct encodec_context *ectx) {
    if (!ectx) {
        fprintf(stderr, "%s: null context\n", __func__);
        return 0;
    }
    return ectx->out_audio.size();
}

int32_t * encodec_get_codes(struct encodec_context *ectx) {
    if (!ectx) {
        fprintf(stderr, "%s: null context\n", __func__);
        return nullptr;
    }
    return ectx->out_codes.data();
}

int encodec_get_codes_size(struct encodec_context *ectx) {
    if (!ectx) {
        fprintf(stderr, "%s: null context\n", __func__);
        return 0;
    }
    return ectx->out_codes.size();
}


================================================
FILE: encodec.h
================================================
/*
╞══════════════════════════════════════════════════════════════════════════════╡
│ Copyright 2024 Pierre-Antoine Bannier                                        │
│                                                                              │
│ Permission to use, copy, modify, and/or distribute this software for         │
│ any purpose with or without fee is hereby granted, provided that the         │
│ above copyright notice and this permission notice appear in all copies.      │
│                                                                              │
│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL                │
│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED                │
│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE             │
│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL         │
│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR        │
│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER               │
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR             │
│ PERFORMANCE OF THIS SOFTWARE.                                                │
╚─────────────────────────────────────────────────────────────────────────────*/
/*
 * This file contains the declarations of the structs and functions used in the encodec library.
 * The library provides functionality for audio compression and decompression using a custom model.
 * The model consists of an encoder, a quantizer and a decoder, each with their own set of parameters.
 * The library also provides functions for loading and freeing the model, as well as compressing and decompressing audio data.
 *
 */
#pragma once

#include "ggml-alloc.h"
#include "ggml-backend.h"
#include "ggml.h"

#ifdef __cplusplus
extern "C" {
#endif
    struct encodec_context;

    struct encodec_statistics {
        // The time taken to load the model.
        int64_t t_load_us;
        // The time taken to compute the model.
        int64_t t_compute_us;
    };

    /**
     * Loads an encodec model from the specified file path.
     *
     * @param model_path The file path to the encodec model.
     * @param offset The offset (in bytes) to the start of the model in the file.
     * @param n_gpu_layers The number of GPU layers to use.
     * @return A pointer to the encodec context struct.
     */
    struct encodec_context *encodec_load_model(
        const char *model_path,
        const int offset,
        int n_gpu_layers);

    /**
     * Sets the target bandwidth for the given encodec context.
     *
     * @param ectx The encodec context to set the target bandwidth for.
     * @param bandwidth The target bandwidth to set, in bits per second.
     */
    void encodec_set_target_bandwidth(
        struct encodec_context *ectx,
        int bandwidth);

    /**
     * Sets the sample rate for the given encodec context.
     *
     * @param ectx The encodec context to set the target bandwidth for.
     * @param sample_rate The sample rate to set.
     */
    void encodec_set_sample_rate(
        struct encodec_context *ectx,
        int sample_rate);

    /**
     * Reconstructs audio from raw audio data using the specified encodec context.
     *
     * @param ectx The encodec context to use for reconstruction.
     * @param raw_audio The raw audio data to reconstruct.
     * @param n_samples The number of samples in the raw audio buffer.
     * @param n_threads The number of threads to use for reconstruction.
     * @return True if the reconstruction was successful, false otherwise.
     */
    bool encodec_reconstruct_audio(
        struct encodec_context *ectx,
        const float *raw_audio,
        const int n_samples,
        int n_threads);

    /**
     * Compresses audio data using the specified encodec context.
     *
     * @param ectx The encodec context to use for compression.
     * @param raw_audio The raw audio data to compress.
     * @param n_samples The number of samples in the raw audio buffer.
     * @param n_threads The number of threads to use for compression.
     * @return True if the compression was successful, false otherwise.
     */
    bool encodec_compress_audio(
        struct encodec_context *ectx,
        const float *raw_audio,
        const int n_samples,
        int n_threads);

    /**
     * Decompresses audio data using the specified encodec context.
     *
     * @param ectx The encodec context to use for decompression.
     * @param codes The compressed audio data to decompress.
     * @param n_codes The number of codes in the codes buffer.
     * @param n_threads The number of threads to use for decompression.
     * @return True if the audio data was successfully decompressed, false otherwise.
     */
    bool encodec_decompress_audio(
        struct encodec_context *ectx,
        const int32_t *codes,
        const int n_codes,
        int n_threads);

    /**
     * Gets the audio data from the given encodec context.
     *
     * @param ectx The encodec context to get the audio data from.
     * @return A pointer to the audio data.
    */
    float * encodec_get_audio(
        struct encodec_context *ectx);

    /**
     * Gets the size of the audio data from the given encodec context.
     *
     * @param ectx The encodec context to get the audio size from.
     * @return The size of the audio data.
    */
    int encodec_get_audio_size(
        struct encodec_context *ectx);

    /**
     * Gets the code data from the given encodec context.
     *
     * @param ectx The encodec context to get the code data from.
     * @return A pointer to the code data.
    */
    int32_t * encodec_get_codes(
        struct encodec_context *ectx);

    /**
     * Gets the size of the code data from the given encodec context.
     *
     * @param ectx The encodec context to get the code size from.
     * @return The size of the code data.
    */
    int encodec_get_codes_size(
        struct encodec_context *ectx);

    /**
     * Gets the statistics for the given encodec context.
     *
     * @param ectx The encodec context to get the statistics for.
     * @return A pointer to the statistics struct.
    */
    const struct encodec_statistics* encodec_get_statistics(
        struct encodec_context *ectx);

    /**
     * Reset the statistics for the given encodec context.
     *
     * @param ectx The encodec context to reset the statistics for.
    */
   void encodec_reset_statistics(
        struct encodec_context *ectx);

    /**
     * @brief Frees the memory allocated for an encodec context.
     *
     * @param ectx The encodec context to free.
     */
    void encodec_free(
        struct encodec_context *ectx);

#ifdef __cplusplus
}
#endif

================================================
FILE: encoder.h
================================================
#pragma once

#include <vector>

#include "ggml.h"
#include "lstm.h"

// res + downsample block at some ratio
struct encodec_encoder_block {
    // conv1
    struct ggml_tensor *conv_1_w;
    struct ggml_tensor *conv_1_b;

    // conv2
    struct ggml_tensor *conv_2_w;
    struct ggml_tensor *conv_2_b;

    // shortcut
    struct ggml_tensor *conv_sc_w;
    struct ggml_tensor *conv_sc_b;

    // downsampling layers
    struct ggml_tensor *ds_conv_w;
    struct ggml_tensor *ds_conv_b;
};

struct encodec_encoder {
    struct ggml_tensor *init_conv_w;
    struct ggml_tensor *init_conv_b;

    encodec_lstm lstm;

    struct ggml_tensor *final_conv_w;
    struct ggml_tensor *final_conv_b;

    std::vector<encodec_encoder_block> blocks;
};

struct ggml_tensor *encodec_forward_encoder(
    const struct encodec_encoder *encoder, struct ggml_context *ctx0,
    struct ggml_tensor *inp, const int * ratios, const int kernel_size, const int res_kernel_size,
    const int stride) {

    if (!inp) {
        fprintf(stderr, "%s: null input tensor\n", __func__);
        return NULL;
    }

    struct ggml_tensor *inpL = strided_conv_1d(
        ctx0, inp, encoder->init_conv_w, encoder->init_conv_b, stride);

    for (int layer_ix = 0; layer_ix < 4; layer_ix++) {
        encodec_encoder_block block = encoder->blocks[layer_ix];

        struct ggml_tensor *current = inpL;

        // shortcut
        struct ggml_tensor *shortcut = strided_conv_1d(
            ctx0, inpL, block.conv_sc_w, block.conv_sc_b, stride);

        // conv1
        current = ggml_elu(ctx0, current);

        current = strided_conv_1d(
            ctx0, current, block.conv_1_w, block.conv_1_b, stride);

        // conv2
        current = ggml_elu(ctx0, current);

        current = strided_conv_1d(
            ctx0, current, block.conv_2_w, block.conv_2_b, stride);

        // residual connection
        inpL = ggml_add(ctx0, current, shortcut);

        // downsampling layers
        inpL = ggml_elu(ctx0, inpL);

        inpL = strided_conv_1d(
            ctx0, inpL, block.ds_conv_w, block.ds_conv_b, ratios[3 - layer_ix]);
    }

    // lstm
    {
        struct ggml_tensor *cur = inpL;

        const encodec_lstm lstm = encoder->lstm;

        // first lstm layer
        char l0_prefix[7] = "enc_l0";
        struct ggml_tensor *hs1 = forward_pass_lstm_unilayer(
            ctx0, cur, lstm.l0_ih_w, lstm.l0_hh_w, lstm.l0_ih_b, lstm.l0_hh_b, l0_prefix);

        // second lstm layer
        char l1_prefix[7] = "enc_l1";
        struct ggml_tensor *out = forward_pass_lstm_unilayer(
            ctx0, hs1, lstm.l1_ih_w, lstm.l1_hh_w, lstm.l1_ih_b, lstm.l1_hh_b, l1_prefix);

        inpL = ggml_add(ctx0, inpL, out);
    }

    // final conv
    inpL = ggml_elu(ctx0, inpL);

    struct ggml_tensor *encoded_inp = strided_conv_1d(
        ctx0, inpL, encoder->final_conv_w, encoder->final_conv_b, stride);

    return encoded_inp;
}


================================================
FILE: examples/CMakeLists.txt
================================================
add_library(common STATIC common.cpp)
target_include_directories(common PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
target_compile_features(common PRIVATE cxx_std_11)

add_subdirectory(main)
add_subdirectory(compress)
add_subdirectory(decompress)


================================================
FILE: examples/README.md
================================================
# Examples

## main (Encoder/decoder end-to-end)

This example shows an end-to-end pipeline to generate codes from a raw WAV audio file
and use these codes to reconstruct the WAV audio file.

```bash
    ./main -t 8 \
    -i ~/Documents/encodec.cpp/build/bin/audio.wav \
    -m ~/Documents/encodec.cpp/ggml_weights/ggml-model.bin \
    -o output.wav
```

## Compress (encoder only)

This example shows how to generate a compressed representation of an audio with
the Encodec's encoder.

```bash
    ./compress -t 8 \
    -i ~/Documents/encodec.cpp/build/bin/audio.wav \
    -m ~/Documents/encodec.cpp/ggml_weights/ggml-model.bin \
    -o output.bin
```

## Decompress (decoder only)

This example shows how to reconstruct an audio from a compressed representation
generated by the Encodec's encoder.

```bash
    ./decompress -t 8 \
    -i ~/Documents/encodec.cpp/build/bin/output.bin \
    -m ~/Documents/encodec.cpp/ggml_weights/ggml-model.bin \
    -o output.wav
```


================================================
FILE: examples/common.cpp
================================================
#include <cstdint>
#include <cmath>
#include <fstream>
#include <iostream>
#include <stdexcept>
#include <string>
#include <thread>
#include <vector>

#define DR_WAV_IMPLEMENTATION
#include "dr_wav.h"
#include "json.hpp"

#include "common.h"

#define SAMPLE_RATE 24000
#define BITS_PER_CODEBOOK 10    // int(log2(quantizer.bins)); quantizer.bins = 1024

using json = nlohmann::json;

// The ECDC file format expects big endian byte order.
// This function swaps the endianness of a 32-bit integer.
uint32_t swap_endianness(uint32_t value) {
    return ((value & 0x000000FF) << 24) |
           ((value & 0x0000FF00) << 8) |
           ((value & 0x00FF0000) >> 8) |
           ((value & 0xFF000000) >> 24);
}

// This checks if the system is in big-endian or little-endian order.
bool is_big_endian(void) {
    union {
        uint32_t i;
        char c[4];
    } bint = {0x01020304};

    return bint.c[0] == 1;
}

void encodec_print_usage(char ** argv, const encodec_params & params) {
    fprintf(stderr, "usage: %s [options]\n", argv[0]);
    fprintf(stderr, "\n");
    fprintf(stderr, "options:\n");
    fprintf(stderr, "  -h, --help             show this help message and exit\n");
    fprintf(stderr, "  -t N, --threads N      number of threads to use during computation (default: %d)\n", params.n_threads);
    fprintf(stderr, "  -g N, --n-gpu-layers N number of GPU layers to use during computation (default: %d)\n", params.n_gpu_layers);
    fprintf(stderr, "  -m FNAME, --model FNAME\n");
    fprintf(stderr, "                         model path (default: %s)\n", params.model_path.c_str());
    fprintf(stderr, "  -i FNAME, --input FNAME\n");
    fprintf(stderr, "                         original audio wav (default: %s)\n", params.input_path.c_str());
    fprintf(stderr, "  -o FNAME, --outwav FNAME\n");
    fprintf(stderr, "                         output generated wav (default: %s)\n", params.output_path.c_str());
    fprintf(stderr, "\n");
}

int encodec_params_parse(int argc, char ** argv, encodec_params & params) {
    for (int i = 1; i < argc; i++) {
        std::string arg = argv[i];

        if (arg == "-t" || arg == "--threads") {
            params.n_threads = std::stoi(argv[++i]);
        } else if (arg == "-g" || arg == "--n-gpu-layers") {
            params.n_gpu_layers = std::stoi(argv[++i]);
        } else if (arg == "-m" || arg == "--model") {
            params.model_path = argv[++i];
        } else if (arg == "-o" || arg == "--outwav") {
            params.output_path = argv[++i];
        } else if (arg == "-i" || arg == "--input") {
            params.input_path = argv[++i];
        } else if (arg == "-h" || arg == "--help") {
            encodec_print_usage(argv, params);
            exit(0);
        } else {
            fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
            encodec_print_usage(argv, params);
            exit(0);
        }
    }

    return 0;
}

bool read_wav_from_disk(std::string in_path, std::vector<float> & audio_arr) {
    uint32_t channels;
    uint32_t sample_rate;
    drwav_uint64 total_frame_count;

    float * raw_audio = drwav_open_file_and_read_pcm_frames_f32(
        in_path.c_str(), &channels, &sample_rate, &total_frame_count, NULL);

    if (raw_audio == NULL) {
        fprintf(stderr, "%s: could not read wav file\n", __func__);
        return false;
    }

    fprintf(stderr, "\n%s: Number of frames read = %lld.\n", __func__, total_frame_count);

    audio_arr.resize(total_frame_count);
    memcpy(audio_arr.data(), raw_audio, total_frame_count * sizeof(float));

    drwav_free(raw_audio, NULL);

    return true;
}

void write_wav_on_disk(std::vector<float> & audio_arr, std::string dest_path) {
    drwav_data_format format;
    format.bitsPerSample = 32;
    format.sampleRate = SAMPLE_RATE;
    format.container = drwav_container_riff;
    format.channels = 1;
    format.format = DR_WAVE_FORMAT_IEEE_FLOAT;

    drwav wav;
    drwav_init_file_write(&wav, dest_path.c_str(), &format, NULL);
    drwav_uint64 frames = drwav_write_pcm_frames(&wav, audio_arr.size(), audio_arr.data());
    drwav_uninit(&wav);

    fprintf(stderr, "%s: Number of frames written = %lld.\n", __func__, frames);
}

class BitPacker {
    public:
        // Constructor
        BitPacker(int bits, std::ofstream& fo)
            : current_value(0), current_bits(0), bits(bits), fo(fo) {}

        // Member function to push a new value to the stream
        void push(int value) {
            current_value += (value << current_bits);
            current_bits += bits;
            while (current_bits >= 8) {
                uint8_t lower_8bits = current_value & 0xff;
                current_bits -= 8;
                current_value >>= 8;
                fo.write(reinterpret_cast<char*>(&lower_8bits), sizeof(lower_8bits));
            }
        }

        // Member function to flush the remaining partial uint8
        void flush() {
            if (current_bits) {
                fo.write(reinterpret_cast<char*>(&current_value), sizeof(uint8_t));
                current_value = 0;
                current_bits = 0;
            }
            fo.flush();
        }

    private:
        int current_value;
        int current_bits;
        int bits;
        std::ofstream & fo;
};

class BitUnpacker {
    public:
        // Constructor
        BitUnpacker(int bits, std::ifstream& fo)
            : bits(bits), fo(fo), mask((1 << bits) - 1), current_value(0), current_bits(0) {}

        // Member function to pull a single value from the stream
        int pull() {
            while (current_bits < bits) {
                char buf;
                if (!fo.read(&buf, 1)) {
                    return {};  // returns empty optional indicating end of stream
                }
                uint8_t character = static_cast<uint8_t>(buf);
                current_value += character << current_bits;
                current_bits += 8;
            }

            int out = current_value & mask;
            current_value >>= bits;
            current_bits -= bits;
            return out;  // returns the extracted value
        }

    private:
        int bits;
        std::ifstream& fo;
        int mask;
        int current_value;
        int current_bits;
};

void write_encodec_header(std::ofstream & fo, uint32_t audio_length) {
    json metadata = {
        {"m" , "encodec_24khz"},
        {"al",    audio_length},
        {"nc",              16},
        {"lm",           false},
    };
    std::string meta_dumped = metadata.dump();

    std::string magic = "ECDC";
    uint8_t version = 0;

    uint32_t meta_length = static_cast<uint32_t>(meta_dumped.size());
    if (!is_big_endian()) {
        // if little endian, needs to swap to big-endian order for correct ECDC format.
        meta_length = swap_endianness(meta_length);
    }

    fo.write(magic.c_str(), magic.size());
    fo.write((char *) &version, sizeof(version));
    fo.write((char *) &meta_length, sizeof(uint32_t));

    fo.write(meta_dumped.data(), meta_dumped.size());

    fo.flush();
}

json read_ecdc_header(std::ifstream & fin) {
    std::string magic;
    uint8_t version;
    uint32_t meta_length;

    std::string meta_str;

    std::vector<char> buf_magic(4);
    fin.read(&buf_magic[0], buf_magic.size());
    magic.assign(&buf_magic[0], buf_magic.size());

    fin.read((char *) &version, sizeof(version));
    fin.read((char *) &meta_length, sizeof(meta_length));

    // switch to little endian if necessary
    if (!is_big_endian()) {
        meta_length = swap_endianness(meta_length);
    }

    if (magic != "ECDC") {
        throw std::runtime_error("File is not in ECDC format.");
    }

    if (version != 0) {
        throw std::runtime_error("Version not supported.");
    }

    std::vector<char> buf_meta(meta_length);
    fin.read(&buf_meta[0], buf_meta.size());
    meta_str.assign(&buf_meta[0], buf_meta.size());

    return json::parse(meta_str);
}

void write_encodec_codes(
                 std::ofstream & fo,
          std::vector<int32_t> & codes) {
    BitPacker bp(BITS_PER_CODEBOOK, fo);

    for (int32_t code : codes) {
        bp.push(code);
    }

    bp.flush();
}

bool write_codes_to_file(
                   std::string   dest_path,
          std::vector<int32_t> & codes,
                      uint32_t   audio_length) {
    std::ofstream fo(dest_path, std::ios::binary);

    write_encodec_header(fo, audio_length);
    write_encodec_codes(fo, codes);

    fo.close();

    return true;
}

bool read_codes_from_file(
                   std::string   code_path,
          std::vector<int32_t> & codes,
                      uint32_t & audio_length,
                      uint32_t & n_codebooks) {
    std::ifstream fin(code_path, std::ios::binary);

    json metadata = read_ecdc_header(fin);

    try {
        if (metadata.contains("al") && metadata["al"].is_number_unsigned()) {
            audio_length = metadata["al"];
        } else {
            fprintf(stderr, "error: metadata does not contain audio length\n");
            return false;
        }

        if (metadata.contains("nc") && metadata["nc"].is_number_unsigned()) {
            n_codebooks = metadata["nc"];
        } else {
            fprintf(stderr, "error: metadata does not contain number of codebooks\n");
            return false;
        }
    } catch (const json::exception & ex) {
        fprintf(stderr, "JSON Error: %s", ex.what());
    }

    // TODO: remove hardcoded values
    const int hop_length = 320;  // 8 * 5 * 4 * 2
    const int frame_rate = std::ceil((float) SAMPLE_RATE / hop_length);
    const int frame_length = std::ceil((float) audio_length * frame_rate / SAMPLE_RATE);

    codes.resize(frame_length * n_codebooks);

    BitUnpacker bu(BITS_PER_CODEBOOK, fin);

    for (size_t i = 0; i < codes.size(); i++) {
        codes[i] = bu.pull();
    }

    fin.close();

    return true;
}


================================================
FILE: examples/common.h
================================================
#include <string>
#include <vector>

/**
 * @brief Struct containing parameters for the encodec context.
 *
 */
struct encodec_params {
    // number of threads for inference
    int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());

    // weights location
    std::string model_path = "/Users/pbannier/Documents/encodec.cpp/ggml_weights/ggml-model.bin";

    // input location
    std::string input_path = "/Users/pbannier/Documents/encodec/decomp_24khz_True.wav";

    // output location
    std::string output_path = "output.wav";

    // number of GPU layers to use
    int32_t n_gpu_layers = 0;
};

/**
 * @brief Parses command line arguments and sets the encodec parameters accordingly.
 *
 * @param argc Number of command line arguments.
 * @param argv Array of command line arguments.
 * @param params Struct containing encodec parameters.
 * @return int Returns 0 if successful, -1 otherwise.
 */
int encodec_params_parse(int argc, char ** argv, encodec_params & params);

/**
 * @brief Reads a WAV file from disk and stores the audio data in a vector of floats.
 *
 * @param in_path Path to the input WAV file.
 * @param audio_arr Vector to store the audio data.
 * @return true If the file was successfully read.
 * @return false If the file could not be read.
 */
bool read_wav_from_disk(std::string in_path, std::vector<float> & audio_arr);

/**
 * @brief Writes a vector of floats to a WAV file on disk.
 *
 * @param audio_arr Vector containing the audio data.
 * @param dest_path Path to the output WAV file.
 */
void write_wav_on_disk(std::vector<float>& audio_arr, std::string dest_path);

/**
 * @brief Writes a vector of integers to a file on disk.
 *
 * @param dest_path Path to the output file.
 * @param codes Vector containing the integers to write.
 * @param audio_length Original length of the audio.
 * @return true If the file was successfully written.
 * @return false If the file could not be written.
 */
bool write_codes_to_file(std::string dest_path, std::vector<int32_t> & codes, uint32_t audio_length);

/**
 * @brief Reads a vector of integers from a file on disk.
 *
 * @param code_path Path to the input file.
 * @param codes Vector to store the codes.
 * @param audio_length Original length of the audio.
 * @param n_codebooks Number of codebooks used to encode the audio.
 * @return std::vector<int32_t> Vector containing the integers read from the file.
 */
bool read_codes_from_file(
                   std::string   code_path,
          std::vector<int32_t> & codes,
                      uint32_t & audio_length,
                      uint32_t & n_codebooks);


================================================
FILE: examples/compress/CMakeLists.txt
================================================
set(TARGET compress)
add_executable(${TARGET} main.cpp)
target_link_libraries(${TARGET} PRIVATE encodec common)
target_compile_features(${TARGET} PRIVATE cxx_std_11)

================================================
FILE: examples/compress/main.cpp
================================================
#include <cstring>
#include <memory>
#include <string>
#include <thread>

#include "encodec.h"
#include "common.h"


int main(int argc, char **argv) {
    ggml_time_init();
    const int64_t t_main_start_us = ggml_time_us();

    encodec_params params;

    if (encodec_params_parse(argc, argv, params) > 0) {
        fprintf(stderr, "%s: Could not parse arguments\n", __func__);
        return 1;
    }

    // initialize encodec context
    struct encodec_context * ectx = encodec_load_model(params.model_path.c_str(), 0 /* offset */, params.n_gpu_layers);
    if (!ectx) {
        printf("%s: error during loading model\n", __func__);
        return 1;
    }

    encodec_set_target_bandwidth(ectx, 12);

    // read audio from disk
    std::vector<float> original_audio_arr;
    if(!read_wav_from_disk(params.input_path, original_audio_arr)) {
        printf("%s: error during reading wav file\n", __func__);
        return 1;
    }

    // compress audio
    float * audio_data = original_audio_arr.data();
    int n_samples = original_audio_arr.size();
    if (!encodec_compress_audio(ectx, audio_data, n_samples, params.n_threads)) {
        printf("%s: error during compression \n", __func__);
        return 1;
    }

    // write reconstructed audio on disk
    int32_t * codes_data = encodec_get_codes(ectx);
    int n_codes = encodec_get_codes_size(ectx);
    std::vector<int32_t> codes_arr(codes_data, codes_data + n_codes);

    if (!write_codes_to_file(params.output_path, codes_arr, original_audio_arr.size())) {
        printf("%s: error during writing codes to file\n", __func__);
        return 1;
    }

    // report timing
    {
        const int64_t t_main_end_us = ggml_time_us();

        const encodec_statistics * stats = encodec_get_statistics(ectx);

        printf("\n\n");
        printf("%s:     load time = %8.2f ms\n", __func__, stats->t_load_us/1000.0f);
        printf("%s:     eval time = %8.2f ms\n", __func__, stats->t_compute_us/1000.0f);
        printf("%s:    total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f);
    }

    encodec_free(ectx);

    return 0;
}

================================================
FILE: examples/decompress/CMakeLists.txt
================================================
set(TARGET decompress)
add_executable(${TARGET} main.cpp)
target_link_libraries(${TARGET} PRIVATE encodec common)
target_compile_features(${TARGET} PRIVATE cxx_std_11)

================================================
FILE: examples/decompress/main.cpp
================================================
#include <cstring>
#include <memory>
#include <string>
#include <thread>

#include "encodec.h"
#include "common.h"


int main(int argc, char **argv) {
    ggml_time_init();
    const int64_t t_main_start_us = ggml_time_us();

    encodec_params params;

    if (encodec_params_parse(argc, argv, params) > 0) {
        fprintf(stderr, "%s: Could not parse arguments\n", __func__);
        return 1;
    }

    // initialize encodec context
    struct encodec_context * ectx = encodec_load_model(params.model_path.c_str(), 0 /* offset */, params.n_gpu_layers);
    if (!ectx) {
        printf("%s: error during loading model\n", __func__);
        return 1;
    }

    encodec_set_target_bandwidth(ectx, 12);

    // read compressed audio from disk
    std::vector<int32_t> codes;
    uint32_t audio_length, n_codebooks;
    if (!read_codes_from_file(params.input_path, codes, audio_length, n_codebooks)) {
        printf("%s: error during reading codes\n", __func__);
        return 1;
    }

    // decompress audio
    if (!encodec_decompress_audio(ectx, codes.data(), codes.size(), params.n_threads)) {
        printf("%s: error during decompression\n", __func__);
        return 1;
    }

    // write reconstructed audio on disk
    const float * audio_data = encodec_get_audio(ectx);
    const int audio_size = encodec_get_audio_size(ectx);
    std::vector<float> audio_arr(audio_data, audio_data + audio_size);
    audio_arr.resize(audio_length);

    write_wav_on_disk(audio_arr, params.output_path);

    // report timing
    {
        const int64_t t_main_end_us = ggml_time_us();

        const encodec_statistics * stats = encodec_get_statistics(ectx);

        printf("\n\n");
        printf("%s:     load time = %8.2f ms\n", __func__, stats->t_load_us/1000.0f);
        printf("%s:     eval time = %8.2f ms\n", __func__, stats->t_compute_us/1000.0f);
        printf("%s:    total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f);
    }

    encodec_free(ectx);

    return 0;
}


================================================
FILE: examples/dr_wav.h
================================================
/*
WAV audio loader and writer. Choice of public domain or MIT-0. See license statements at the end of this file.
dr_wav - v0.12.16 - 2020-12-02

David Reid - mackron@gmail.com

GitHub: https://github.com/mackron/dr_libs
*/

/*
RELEASE NOTES - VERSION 0.12
============================
Version 0.12 includes breaking changes to custom chunk handling.


Changes to Chunk Callback
-------------------------
dr_wav supports the ability to fire a callback when a chunk is encounted (except for WAVE and FMT chunks). The callback has been updated to include both the
container (RIFF or Wave64) and the FMT chunk which contains information about the format of the data in the wave file.

Previously, there was no direct way to determine the container, and therefore no way to discriminate against the different IDs in the chunk header (RIFF and
Wave64 containers encode chunk ID's differently). The `container` parameter can be used to know which ID to use.

Sometimes it can be useful to know the data format at the time the chunk callback is fired. A pointer to a `drwav_fmt` object is now passed into the chunk
callback which will give you information about the data format. To determine the sample format, use `drwav_fmt_get_format()`. This will return one of the
`DR_WAVE_FORMAT_*` tokens.
*/

/*
Introduction
============
This is a single file library. To use it, do something like the following in one .c file.
    
    ```c
    #define DR_WAV_IMPLEMENTATION
    #include "dr_wav.h"
    ```

You can then #include this file in other parts of the program as you would with any other header file. Do something like the following to read audio data:

    ```c
    drwav wav;
    if (!drwav_init_file(&wav, "my_song.wav", NULL)) {
        // Error opening WAV file.
    }

    drwav_int32* pDecodedInterleavedPCMFrames = malloc(wav.totalPCMFrameCount * wav.channels * sizeof(drwav_int32));
    size_t numberOfSamplesActuallyDecoded = drwav_read_pcm_frames_s32(&wav, wav.totalPCMFrameCount, pDecodedInterleavedPCMFrames);

    ...

    drwav_uninit(&wav);
    ```

If you just want to quickly open and read the audio data in a single operation you can do something like this:

    ```c
    unsigned int channels;
    unsigned int sampleRate;
    drwav_uint64 totalPCMFrameCount;
    float* pSampleData = drwav_open_file_and_read_pcm_frames_f32("my_song.wav", &channels, &sampleRate, &totalPCMFrameCount, NULL);
    if (pSampleData == NULL) {
        // Error opening and reading WAV file.
    }

    ...

    drwav_free(pSampleData);
    ```

The examples above use versions of the API that convert the audio data to a consistent format (32-bit signed PCM, in this case), but you can still output the
audio data in its internal format (see notes below for supported formats):

    ```c
    size_t framesRead = drwav_read_pcm_frames(&wav, wav.totalPCMFrameCount, pDecodedInterleavedPCMFrames);
    ```

You can also read the raw bytes of audio data, which could be useful if dr_wav does not have native support for a particular data format:

    ```c
    size_t bytesRead = drwav_read_raw(&wav, bytesToRead, pRawDataBuffer);
    ```

dr_wav can also be used to output WAV files. This does not currently support compressed formats. To use this, look at `drwav_init_write()`,
`drwav_init_file_write()`, etc. Use `drwav_write_pcm_frames()` to write samples, or `drwav_write_raw()` to write raw data in the "data" chunk.

    ```c
    drwav_data_format format;
    format.container = drwav_container_riff;     // <-- drwav_container_riff = normal WAV files, drwav_container_w64 = Sony Wave64.
    format.format = DR_WAVE_FORMAT_PCM;          // <-- Any of the DR_WAVE_FORMAT_* codes.
    format.channels = 2;
    format.sampleRate = 44100;
    format.bitsPerSample = 16;
    drwav_init_file_write(&wav, "data/recording.wav", &format, NULL);

    ...

    drwav_uint64 framesWritten = drwav_write_pcm_frames(pWav, frameCount, pSamples);
    ```

dr_wav has seamless support the Sony Wave64 format. The decoder will automatically detect it and it should Just Work without any manual intervention.


Build Options
=============
#define these options before including this file.

#define DR_WAV_NO_CONVERSION_API
  Disables conversion APIs such as `drwav_read_pcm_frames_f32()` and `drwav_s16_to_f32()`.

#define DR_WAV_NO_STDIO
  Disables APIs that initialize a decoder from a file such as `drwav_init_file()`, `drwav_init_file_write()`, etc.



Notes
=====
- Samples are always interleaved.
- The default read function does not do any data conversion. Use `drwav_read_pcm_frames_f32()`, `drwav_read_pcm_frames_s32()` and `drwav_read_pcm_frames_s16()`
  to read and convert audio data to 32-bit floating point, signed 32-bit integer and signed 16-bit integer samples respectively. Tested and supported internal
  formats include the following:
  - Unsigned 8-bit PCM
  - Signed 12-bit PCM
  - Signed 16-bit PCM
  - Signed 24-bit PCM
  - Signed 32-bit PCM
  - IEEE 32-bit floating point
  - IEEE 64-bit floating point
  - A-law and u-law
  - Microsoft ADPCM
  - IMA ADPCM (DVI, format code 0x11)
- dr_wav will try to read the WAV file as best it can, even if it's not strictly conformant to the WAV format.
*/

#ifndef dr_wav_h
#define dr_wav_h

#ifdef __cplusplus
extern "C" {
#endif

#define DRWAV_STRINGIFY(x)      #x
#define DRWAV_XSTRINGIFY(x)     DRWAV_STRINGIFY(x)

#define DRWAV_VERSION_MAJOR     0
#define DRWAV_VERSION_MINOR     12
#define DRWAV_VERSION_REVISION  16
#define DRWAV_VERSION_STRING    DRWAV_XSTRINGIFY(DRWAV_VERSION_MAJOR) "." DRWAV_XSTRINGIFY(DRWAV_VERSION_MINOR) "." DRWAV_XSTRINGIFY(DRWAV_VERSION_REVISION)

#include <stddef.h> /* For size_t. */

/* Sized types. */
typedef   signed char           drwav_int8;
typedef unsigned char           drwav_uint8;
typedef   signed short          drwav_int16;
typedef unsigned short          drwav_uint16;
typedef   signed int            drwav_int32;
typedef unsigned int            drwav_uint32;
#if defined(_MSC_VER)
    typedef   signed __int64    drwav_int64;
    typedef unsigned __int64    drwav_uint64;
#else
    #if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)))
        #pragma GCC diagnostic push
        #pragma GCC diagnostic ignored "-Wlong-long"
        #if defined(__clang__)
            #pragma GCC diagnostic ignored "-Wc++11-long-long"
        #endif
    #endif
    typedef   signed long long  drwav_int64;
    typedef unsigned long long  drwav_uint64;
    #if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)))
        #pragma GCC diagnostic pop
    #endif
#endif
#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__)) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
    typedef drwav_uint64        drwav_uintptr;
#else
    typedef drwav_uint32        drwav_uintptr;
#endif
typedef drwav_uint8             drwav_bool8;
typedef drwav_uint32            drwav_bool32;
#define DRWAV_TRUE              1
#define DRWAV_FALSE             0

#if !defined(DRWAV_API)
    #if defined(DRWAV_DLL)
        #if defined(_WIN32)
            #define DRWAV_DLL_IMPORT  __declspec(dllimport)
            #define DRWAV_DLL_EXPORT  __declspec(dllexport)
            #define DRWAV_DLL_PRIVATE static
        #else
            #if defined(__GNUC__) && __GNUC__ >= 4
                #define DRWAV_DLL_IMPORT  __attribute__((visibility("default")))
                #define DRWAV_DLL_EXPORT  __attribute__((visibility("default")))
                #define DRWAV_DLL_PRIVATE __attribute__((visibility("hidden")))
            #else
                #define DRWAV_DLL_IMPORT
                #define DRWAV_DLL_EXPORT
                #define DRWAV_DLL_PRIVATE static
            #endif
        #endif

        #if defined(DR_WAV_IMPLEMENTATION) || defined(DRWAV_IMPLEMENTATION)
            #define DRWAV_API  DRWAV_DLL_EXPORT
        #else
            #define DRWAV_API  DRWAV_DLL_IMPORT
        #endif
        #define DRWAV_PRIVATE DRWAV_DLL_PRIVATE
    #else
        #define DRWAV_API extern
        #define DRWAV_PRIVATE static
    #endif
#endif

typedef drwav_int32 drwav_result;
#define DRWAV_SUCCESS                        0
#define DRWAV_ERROR                         -1   /* A generic error. */
#define DRWAV_INVALID_ARGS                  -2
#define DRWAV_INVALID_OPERATION             -3
#define DRWAV_OUT_OF_MEMORY                 -4
#define DRWAV_OUT_OF_RANGE                  -5
#define DRWAV_ACCESS_DENIED                 -6
#define DRWAV_DOES_NOT_EXIST                -7
#define DRWAV_ALREADY_EXISTS                -8
#define DRWAV_TOO_MANY_OPEN_FILES           -9
#define DRWAV_INVALID_FILE                  -10
#define DRWAV_TOO_BIG                       -11
#define DRWAV_PATH_TOO_LONG                 -12
#define DRWAV_NAME_TOO_LONG                 -13
#define DRWAV_NOT_DIRECTORY                 -14
#define DRWAV_IS_DIRECTORY                  -15
#define DRWAV_DIRECTORY_NOT_EMPTY           -16
#define DRWAV_END_OF_FILE                   -17
#define DRWAV_NO_SPACE                      -18
#define DRWAV_BUSY                          -19
#define DRWAV_IO_ERROR                      -20
#define DRWAV_INTERRUPT                     -21
#define DRWAV_UNAVAILABLE                   -22
#define DRWAV_ALREADY_IN_USE                -23
#define DRWAV_BAD_ADDRESS                   -24
#define DRWAV_BAD_SEEK                      -25
#define DRWAV_BAD_PIPE                      -26
#define DRWAV_DEADLOCK                      -27
#define DRWAV_TOO_MANY_LINKS                -28
#define DRWAV_NOT_IMPLEMENTED               -29
#define DRWAV_NO_MESSAGE                    -30
#define DRWAV_BAD_MESSAGE                   -31
#define DRWAV_NO_DATA_AVAILABLE             -32
#define DRWAV_INVALID_DATA                  -33
#define DRWAV_TIMEOUT                       -34
#define DRWAV_NO_NETWORK                    -35
#define DRWAV_NOT_UNIQUE                    -36
#define DRWAV_NOT_SOCKET                    -37
#define DRWAV_NO_ADDRESS                    -38
#define DRWAV_BAD_PROTOCOL                  -39
#define DRWAV_PROTOCOL_UNAVAILABLE          -40
#define DRWAV_PROTOCOL_NOT_SUPPORTED        -41
#define DRWAV_PROTOCOL_FAMILY_NOT_SUPPORTED -42
#define DRWAV_ADDRESS_FAMILY_NOT_SUPPORTED  -43
#define DRWAV_SOCKET_NOT_SUPPORTED          -44
#define DRWAV_CONNECTION_RESET              -45
#define DRWAV_ALREADY_CONNECTED             -46
#define DRWAV_NOT_CONNECTED                 -47
#define DRWAV_CONNECTION_REFUSED            -48
#define DRWAV_NO_HOST                       -49
#define DRWAV_IN_PROGRESS                   -50
#define DRWAV_CANCELLED                     -51
#define DRWAV_MEMORY_ALREADY_MAPPED         -52
#define DRWAV_AT_END                        -53

/* Common data formats. */
#define DR_WAVE_FORMAT_PCM          0x1
#define DR_WAVE_FORMAT_ADPCM        0x2
#define DR_WAVE_FORMAT_IEEE_FLOAT   0x3
#define DR_WAVE_FORMAT_ALAW         0x6
#define DR_WAVE_FORMAT_MULAW        0x7
#define DR_WAVE_FORMAT_DVI_ADPCM    0x11
#define DR_WAVE_FORMAT_EXTENSIBLE   0xFFFE

/* Constants. */
#ifndef DRWAV_MAX_SMPL_LOOPS
#define DRWAV_MAX_SMPL_LOOPS        1
#endif

/* Flags to pass into drwav_init_ex(), etc. */
#define DRWAV_SEQUENTIAL            0x00000001

DRWAV_API void drwav_version(drwav_uint32* pMajor, drwav_uint32* pMinor, drwav_uint32* pRevision);
DRWAV_API const char* drwav_version_string(void);

typedef enum
{
    drwav_seek_origin_start,
    drwav_seek_origin_current
} drwav_seek_origin;

typedef enum
{
    drwav_container_riff,
    drwav_container_w64,
    drwav_container_rf64
} drwav_container;

typedef struct
{
    union
    {
        drwav_uint8 fourcc[4];
        drwav_uint8 guid[16];
    } id;

    /* The size in bytes of the chunk. */
    drwav_uint64 sizeInBytes;

    /*
    RIFF = 2 byte alignment.
    W64  = 8 byte alignment.
    */
    unsigned int paddingSize;
} drwav_chunk_header;

typedef struct
{
    /*
    The format tag exactly as specified in the wave file's "fmt" chunk. This can be used by applications
    that require support for data formats not natively supported by dr_wav.
    */
    drwav_uint16 formatTag;

    /* The number of channels making up the audio data. When this is set to 1 it is mono, 2 is stereo, etc. */
    drwav_uint16 channels;

    /* The sample rate. Usually set to something like 44100. */
    drwav_uint32 sampleRate;

    /* Average bytes per second. You probably don't need this, but it's left here for informational purposes. */
    drwav_uint32 avgBytesPerSec;

    /* Block align. This is equal to the number of channels * bytes per sample. */
    drwav_uint16 blockAlign;

    /* Bits per sample. */
    drwav_uint16 bitsPerSample;

    /* The size of the extended data. Only used internally for validation, but left here for informational purposes. */
    drwav_uint16 extendedSize;

    /*
    The number of valid bits per sample. When <formatTag> is equal to WAVE_FORMAT_EXTENSIBLE, <bitsPerSample>
    is always rounded up to the nearest multiple of 8. This variable contains information about exactly how
    many bits are valid per sample. Mainly used for informational purposes.
    */
    drwav_uint16 validBitsPerSample;

    /* The channel mask. Not used at the moment. */
    drwav_uint32 channelMask;

    /* The sub-format, exactly as specified by the wave file. */
    drwav_uint8 subFormat[16];
} drwav_fmt;

DRWAV_API drwav_uint16 drwav_fmt_get_format(const drwav_fmt* pFMT);


/*
Callback for when data is read. Return value is the number of bytes actually read.

pUserData   [in]  The user data that was passed to drwav_init() and family.
pBufferOut  [out] The output buffer.
bytesToRead [in]  The number of bytes to read.

Returns the number of bytes actually read.

A return value of less than bytesToRead indicates the end of the stream. Do _not_ return from this callback until
either the entire bytesToRead is filled or you have reached the end of the stream.
*/
typedef size_t (* drwav_read_proc)(void* pUserData, void* pBufferOut, size_t bytesToRead);

/*
Callback for when data is written. Returns value is the number of bytes actually written.

pUserData    [in]  The user data that was passed to drwav_init_write() and family.
pData        [out] A pointer to the data to write.
bytesToWrite [in]  The number of bytes to write.

Returns the number of bytes actually written.

If the return value differs from bytesToWrite, it indicates an error.
*/
typedef size_t (* drwav_write_proc)(void* pUserData, const void* pData, size_t bytesToWrite);

/*
Callback for when data needs to be seeked.

pUserData [in] The user data that was passed to drwav_init() and family.
offset    [in] The number of bytes to move, relative to the origin. Will never be negative.
origin    [in] The origin of the seek - the current position or the start of the stream.

Returns whether or not the seek was successful.

Whether or not it is relative to the beginning or current position is determined by the "origin" parameter which will be either drwav_seek_origin_start or
drwav_seek_origin_current.
*/
typedef drwav_bool32 (* drwav_seek_proc)(void* pUserData, int offset, drwav_seek_origin origin);

/*
Callback for when drwav_init_ex() finds a chunk.

pChunkUserData    [in] The user data that was passed to the pChunkUserData parameter of drwav_init_ex() and family.
onRead            [in] A pointer to the function to call when reading.
onSeek            [in] A pointer to the function to call when seeking.
pReadSeekUserData [in] The user data that was passed to the pReadSeekUserData parameter of drwav_init_ex() and family.
pChunkHeader      [in] A pointer to an object containing basic header information about the chunk. Use this to identify the chunk.
container         [in] Whether or not the WAV file is a RIFF or Wave64 container. If you're unsure of the difference, assume RIFF.
pFMT              [in] A pointer to the object containing the contents of the "fmt" chunk.

Returns the number of bytes read + seeked.

To read data from the chunk, call onRead(), passing in pReadSeekUserData as the first parameter. Do the same for seeking with onSeek(). The return value must
be the total number of bytes you have read _plus_ seeked.

Use the `container` argument to discriminate the fields in `pChunkHeader->id`. If the container is `drwav_container_riff` or `drwav_container_rf64` you should
use `id.fourcc`, otherwise you should use `id.guid`.

The `pFMT` parameter can be used to determine the data format of the wave file. Use `drwav_fmt_get_format()` to get the sample format, which will be one of the
`DR_WAVE_FORMAT_*` identifiers. 

The read pointer will be sitting on the first byte after the chunk's header. You must not attempt to read beyond the boundary of the chunk.
*/
typedef drwav_uint64 (* drwav_chunk_proc)(void* pChunkUserData, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pReadSeekUserData, const drwav_chunk_header* pChunkHeader, drwav_container container, const drwav_fmt* pFMT);

typedef struct
{
    void* pUserData;
    void* (* onMalloc)(size_t sz, void* pUserData);
    void* (* onRealloc)(void* p, size_t sz, void* pUserData);
    void  (* onFree)(void* p, void* pUserData);
} drwav_allocation_callbacks;

/* Structure for internal use. Only used for loaders opened with drwav_init_memory(). */
typedef struct
{
    const drwav_uint8* data;
    size_t dataSize;
    size_t currentReadPos;
} drwav__memory_stream;

/* Structure for internal use. Only used for writers opened with drwav_init_memory_write(). */
typedef struct
{
    void** ppData;
    size_t* pDataSize;
    size_t dataSize;
    size_t dataCapacity;
    size_t currentWritePos;
} drwav__memory_stream_write;

typedef struct
{
    drwav_container container;  /* RIFF, W64. */
    drwav_uint32 format;        /* DR_WAVE_FORMAT_* */
    drwav_uint32 channels;
    drwav_uint32 sampleRate;
    drwav_uint32 bitsPerSample;
} drwav_data_format;


/* See the following for details on the 'smpl' chunk: https://sites.google.com/site/musicgapi/technical-documents/wav-file-format#smpl */
typedef struct
{
    drwav_uint32 cuePointId;
    drwav_uint32 type;
    drwav_uint32 start;
    drwav_uint32 end;
    drwav_uint32 fraction;
    drwav_uint32 playCount;
} drwav_smpl_loop;

 typedef struct
{
    drwav_uint32 manufacturer;
    drwav_uint32 product;
    drwav_uint32 samplePeriod;
    drwav_uint32 midiUnityNotes;
    drwav_uint32 midiPitchFraction;
    drwav_uint32 smpteFormat;
    drwav_uint32 smpteOffset;
    drwav_uint32 numSampleLoops;
    drwav_uint32 samplerData;
    drwav_smpl_loop loops[DRWAV_MAX_SMPL_LOOPS];
} drwav_smpl;

typedef struct
{
    /* A pointer to the function to call when more data is needed. */
    drwav_read_proc onRead;

    /* A pointer to the function to call when data needs to be written. Only used when the drwav object is opened in write mode. */
    drwav_write_proc onWrite;

    /* A pointer to the function to call when the wav file needs to be seeked. */
    drwav_seek_proc onSeek;

    /* The user data to pass to callbacks. */
    void* pUserData;

    /* Allocation callbacks. */
    drwav_allocation_callbacks allocationCallbacks;


    /* Whether or not the WAV file is formatted as a standard RIFF file or W64. */
    drwav_container container;


    /* Structure containing format information exactly as specified by the wav file. */
    drwav_fmt fmt;

    /* The sample rate. Will be set to something like 44100. */
    drwav_uint32 sampleRate;

    /* The number of channels. This will be set to 1 for monaural streams, 2 for stereo, etc. */
    drwav_uint16 channels;

    /* The bits per sample. Will be set to something like 16, 24, etc. */
    drwav_uint16 bitsPerSample;

    /* Equal to fmt.formatTag, or the value specified by fmt.subFormat if fmt.formatTag is equal to 65534 (WAVE_FORMAT_EXTENSIBLE). */
    drwav_uint16 translatedFormatTag;

    /* The total number of PCM frames making up the audio data. */
    drwav_uint64 totalPCMFrameCount;


    /* The size in bytes of the data chunk. */
    drwav_uint64 dataChunkDataSize;
    
    /* The position in the stream of the first byte of the data chunk. This is used for seeking. */
    drwav_uint64 dataChunkDataPos;

    /* The number of bytes remaining in the data chunk. */
    drwav_uint64 bytesRemaining;


    /*
    Only used in sequential write mode. Keeps track of the desired size of the "data" chunk at the point of initialization time. Always
    set to 0 for non-sequential writes and when the drwav object is opened in read mode. Used for validation.
    */
    drwav_uint64 dataChunkDataSizeTargetWrite;

    /* Keeps track of whether or not the wav writer was initialized in sequential mode. */
    drwav_bool32 isSequentialWrite;


    /* smpl chunk. */
    drwav_smpl smpl;


    /* A hack to avoid a DRWAV_MALLOC() when opening a decoder with drwav_init_memory(). */
    drwav__memory_stream memoryStream;
    drwav__memory_stream_write memoryStreamWrite;

    /* Generic data for compressed formats. This data is shared across all block-compressed formats. */
    struct
    {
        drwav_uint64 iCurrentPCMFrame;  /* The index of the next PCM frame that will be read by drwav_read_*(). This is used with "totalPCMFrameCount" to ensure we don't read excess samples at the end of the last block. */
    } compressed;
    
    /* Microsoft ADPCM specific data. */
    struct
    {
        drwav_uint32 bytesRemainingInBlock;
        drwav_uint16 predictor[2];
        drwav_int32  delta[2];
        drwav_int32  cachedFrames[4];  /* Samples are stored in this cache during decoding. */
        drwav_uint32 cachedFrameCount;
        drwav_int32  prevFrames[2][2]; /* The previous 2 samples for each channel (2 channels at most). */
    } msadpcm;

    /* IMA ADPCM specific data. */
    struct
    {
        drwav_uint32 bytesRemainingInBlock;
        drwav_int32  predictor[2];
        drwav_int32  stepIndex[2];
        drwav_int32  cachedFrames[16]; /* Samples are stored in this cache during decoding. */
        drwav_uint32 cachedFrameCount;
    } ima;
} drwav;


/*
Initializes a pre-allocated drwav object for reading.

pWav                         [out]          A pointer to the drwav object being initialized.
onRead                       [in]           The function to call when data needs to be read from the client.
onSeek                       [in]           The function to call when the read position of the client data needs to move.
onChunk                      [in, optional] The function to call when a chunk is enumerated at initialized time.
pUserData, pReadSeekUserData [in, optional] A pointer to application defined data that will be passed to onRead and onSeek.
pChunkUserData               [in, optional] A pointer to application defined data that will be passed to onChunk.
flags                        [in, optional] A set of flags for controlling how things are loaded.

Returns true if successful; false otherwise.

Close the loader with drwav_uninit().

This is the lowest level function for initializing a WAV file. You can also use drwav_init_file() and drwav_init_memory()
to open the stream from a file or from a block of memory respectively.

Possible values for flags:
  DRWAV_SEQUENTIAL: Never perform a backwards seek while loading. This disables the chunk callback and will cause this function
                    to return as soon as the data chunk is found. Any chunks after the data chunk will be ignored.

drwav_init() is equivalent to "drwav_init_ex(pWav, onRead, onSeek, NULL, pUserData, NULL, 0);".

The onChunk callback is not called for the WAVE or FMT chunks. The contents of the FMT chunk can be read from pWav->fmt
after the function returns.

See also: drwav_init_file(), drwav_init_memory(), drwav_uninit()
*/
DRWAV_API drwav_bool32 drwav_init(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, drwav_chunk_proc onChunk, void* pReadSeekUserData, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks);

/*
Initializes a pre-allocated drwav object for writing.

onWrite   [in]           The function to call when data needs to be written.
onSeek    [in]           The function to call when the write position needs to move.
pUserData [in, optional] A pointer to application defined data that will be passed to onWrite and onSeek.

Returns true if successful; false otherwise.

Close the writer with drwav_uninit().

This is the lowest level function for initializing a WAV file. You can also use drwav_init_file_write() and drwav_init_memory_write()
to open the stream from a file or from a block of memory respectively.

If the total sample count is known, you can use drwav_init_write_sequential(). This avoids the need for dr_wav to perform
a post-processing step for storing the total sample count and the size of the data chunk which requires a backwards seek.

See also: drwav_init_file_write(), drwav_init_memory_write(), drwav_uninit()
*/
DRWAV_API drwav_bool32 drwav_init_write(drwav* pWav, const drwav_data_format* pFormat, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API drwav_bool32 drwav_init_write_sequential(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_write_proc onWrite, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API drwav_bool32 drwav_init_write_sequential_pcm_frames(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, drwav_write_proc onWrite, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks);

/*
Utility function to determine the target size of the entire data to be written (including all headers and chunks).

Returns the target size in bytes.

Useful if the application needs to know the size to allocate.

Only writing to the RIFF chunk and one data chunk is currently supported.

See also: drwav_init_write(), drwav_init_file_write(), drwav_init_memory_write()
*/
DRWAV_API drwav_uint64 drwav_target_write_size_bytes(const drwav_data_format* pFormat, drwav_uint64 totalSampleCount);

/*
Uninitializes the given drwav object.

Use this only for objects initialized with drwav_init*() functions (drwav_init(), drwav_init_ex(), drwav_init_write(), drwav_init_write_sequential()).
*/
DRWAV_API drwav_result drwav_uninit(drwav* pWav);


/*
Reads raw audio data.

This is the lowest level function for reading audio data. It simply reads the given number of
bytes of the raw internal sample data.

Consider using drwav_read_pcm_frames_s16(), drwav_read_pcm_frames_s32() or drwav_read_pcm_frames_f32() for
reading sample data in a consistent format.

pBufferOut can be NULL in which case a seek will be performed.

Returns the number of bytes actually read.
*/
DRWAV_API size_t drwav_read_raw(drwav* pWav, size_t bytesToRead, void* pBufferOut);

/*
Reads up to the specified number of PCM frames from the WAV file.

The output data will be in the file's internal format, converted to native-endian byte order. Use
drwav_read_pcm_frames_s16/f32/s32() to read data in a specific format.

If the return value is less than <framesToRead> it means the end of the file has been reached or
you have requested more PCM frames than can possibly fit in the output buffer.

This function will only work when sample data is of a fixed size and uncompressed. If you are
using a compressed format consider using drwav_read_raw() or drwav_read_pcm_frames_s16/s32/f32().

pBufferOut can be NULL in which case a seek will be performed.
*/
DRWAV_API drwav_uint64 drwav_read_pcm_frames(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut);
DRWAV_API drwav_uint64 drwav_read_pcm_frames_le(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut);
DRWAV_API drwav_uint64 drwav_read_pcm_frames_be(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut);

/*
Seeks to the given PCM frame.

Returns true if successful; false otherwise.
*/
DRWAV_API drwav_bool32 drwav_seek_to_pcm_frame(drwav* pWav, drwav_uint64 targetFrameIndex);


/*
Writes raw audio data.

Returns the number of bytes actually written. If this differs from bytesToWrite, it indicates an error.
*/
DRWAV_API size_t drwav_write_raw(drwav* pWav, size_t bytesToWrite, const void* pData);

/*
Writes PCM frames.

Returns the number of PCM frames written.

Input samples need to be in native-endian byte order. On big-endian architectures the input data will be converted to
little-endian. Use drwav_write_raw() to write raw audio data without performing any conversion.
*/
DRWAV_API drwav_uint64 drwav_write_pcm_frames(drwav* pWav, drwav_uint64 framesToWrite, const void* pData);
DRWAV_API drwav_uint64 drwav_write_pcm_frames_le(drwav* pWav, drwav_uint64 framesToWrite, const void* pData);
DRWAV_API drwav_uint64 drwav_write_pcm_frames_be(drwav* pWav, drwav_uint64 framesToWrite, const void* pData);


/* Conversion Utilities */
#ifndef DR_WAV_NO_CONVERSION_API

/*
Reads a chunk of audio data and converts it to signed 16-bit PCM samples.

pBufferOut can be NULL in which case a seek will be performed.

Returns the number of PCM frames actually read.

If the return value is less than <framesToRead> it means the end of the file has been reached.
*/
DRWAV_API drwav_uint64 drwav_read_pcm_frames_s16(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut);
DRWAV_API drwav_uint64 drwav_read_pcm_frames_s16le(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut);
DRWAV_API drwav_uint64 drwav_read_pcm_frames_s16be(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut);

/* Low-level function for converting unsigned 8-bit PCM samples to signed 16-bit PCM samples. */
DRWAV_API void drwav_u8_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount);

/* Low-level function for converting signed 24-bit PCM samples to signed 16-bit PCM samples. */
DRWAV_API void drwav_s24_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount);

/* Low-level function for converting signed 32-bit PCM samples to signed 16-bit PCM samples. */
DRWAV_API void drwav_s32_to_s16(drwav_int16* pOut, const drwav_int32* pIn, size_t sampleCount);

/* Low-level function for converting IEEE 32-bit floating point samples to signed 16-bit PCM samples. */
DRWAV_API void drwav_f32_to_s16(drwav_int16* pOut, const float* pIn, size_t sampleCount);

/* Low-level function for converting IEEE 64-bit floating point samples to signed 16-bit PCM samples. */
DRWAV_API void drwav_f64_to_s16(drwav_int16* pOut, const double* pIn, size_t sampleCount);

/* Low-level function for converting A-law samples to signed 16-bit PCM samples. */
DRWAV_API void drwav_alaw_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount);

/* Low-level function for converting u-law samples to signed 16-bit PCM samples. */
DRWAV_API void drwav_mulaw_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount);


/*
Reads a chunk of audio data and converts it to IEEE 32-bit floating point samples.

pBufferOut can be NULL in which case a seek will be performed.

Returns the number of PCM frames actually read.

If the return value is less than <framesToRead> it means the end of the file has been reached.
*/
DRWAV_API drwav_uint64 drwav_read_pcm_frames_f32(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut);
DRWAV_API drwav_uint64 drwav_read_pcm_frames_f32le(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut);
DRWAV_API drwav_uint64 drwav_read_pcm_frames_f32be(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut);

/* Low-level function for converting unsigned 8-bit PCM samples to IEEE 32-bit floating point samples. */
DRWAV_API void drwav_u8_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount);

/* Low-level function for converting signed 16-bit PCM samples to IEEE 32-bit floating point samples. */
DRWAV_API void drwav_s16_to_f32(float* pOut, const drwav_int16* pIn, size_t sampleCount);

/* Low-level function for converting signed 24-bit PCM samples to IEEE 32-bit floating point samples. */
DRWAV_API void drwav_s24_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount);

/* Low-level function for converting signed 32-bit PCM samples to IEEE 32-bit floating point samples. */
DRWAV_API void drwav_s32_to_f32(float* pOut, const drwav_int32* pIn, size_t sampleCount);

/* Low-level function for converting IEEE 64-bit floating point samples to IEEE 32-bit floating point samples. */
DRWAV_API void drwav_f64_to_f32(float* pOut, const double* pIn, size_t sampleCount);

/* Low-level function for converting A-law samples to IEEE 32-bit floating point samples. */
DRWAV_API void drwav_alaw_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount);

/* Low-level function for converting u-law samples to IEEE 32-bit floating point samples. */
DRWAV_API void drwav_mulaw_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount);


/*
Reads a chunk of audio data and converts it to signed 32-bit PCM samples.

pBufferOut can be NULL in which case a seek will be performed.

Returns the number of PCM frames actually read.

If the return value is less than <framesToRead> it means the end of the file has been reached.
*/
DRWAV_API drwav_uint64 drwav_read_pcm_frames_s32(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut);
DRWAV_API drwav_uint64 drwav_read_pcm_frames_s32le(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut);
DRWAV_API drwav_uint64 drwav_read_pcm_frames_s32be(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut);

/* Low-level function for converting unsigned 8-bit PCM samples to signed 32-bit PCM samples. */
DRWAV_API void drwav_u8_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount);

/* Low-level function for converting signed 16-bit PCM samples to signed 32-bit PCM samples. */
DRWAV_API void drwav_s16_to_s32(drwav_int32* pOut, const drwav_int16* pIn, size_t sampleCount);

/* Low-level function for converting signed 24-bit PCM samples to signed 32-bit PCM samples. */
DRWAV_API void drwav_s24_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount);

/* Low-level function for converting IEEE 32-bit floating point samples to signed 32-bit PCM samples. */
DRWAV_API void drwav_f32_to_s32(drwav_int32* pOut, const float* pIn, size_t sampleCount);

/* Low-level function for converting IEEE 64-bit floating point samples to signed 32-bit PCM samples. */
DRWAV_API void drwav_f64_to_s32(drwav_int32* pOut, const double* pIn, size_t sampleCount);

/* Low-level function for converting A-law samples to signed 32-bit PCM samples. */
DRWAV_API void drwav_alaw_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount);

/* Low-level function for converting u-law samples to signed 32-bit PCM samples. */
DRWAV_API void drwav_mulaw_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount);

#endif  /* DR_WAV_NO_CONVERSION_API */


/* High-Level Convenience Helpers */

#ifndef DR_WAV_NO_STDIO
/*
Helper for initializing a wave file for reading using stdio.

This holds the internal FILE object until drwav_uninit() is called. Keep this in mind if you're caching drwav
objects because the operating system may restrict the number of file handles an application can have open at
any given time.
*/
DRWAV_API drwav_bool32 drwav_init_file(drwav* pWav, const char* filename, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API drwav_bool32 drwav_init_file_ex(drwav* pWav, const char* filename, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API drwav_bool32 drwav_init_file_w(drwav* pWav, const wchar_t* filename, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API drwav_bool32 drwav_init_file_ex_w(drwav* pWav, const wchar_t* filename, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks);

/*
Helper for initializing a wave file for writing using stdio.

This holds the internal FILE object until drwav_uninit() is called. Keep this in mind if you're caching drwav
objects because the operating system may restrict the number of file handles an application can have open at
any given time.
*/
DRWAV_API drwav_bool32 drwav_init_file_write(drwav* pWav, const char* filename, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API drwav_bool32 drwav_init_file_write_sequential(drwav* pWav, const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API drwav_bool32 drwav_init_file_write_sequential_pcm_frames(drwav* pWav, const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API drwav_bool32 drwav_init_file_write_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API drwav_bool32 drwav_init_file_write_sequential_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API drwav_bool32 drwav_init_file_write_sequential_pcm_frames_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks);
#endif  /* DR_WAV_NO_STDIO */

/*
Helper for initializing a loader from a pre-allocated memory buffer.

This does not create a copy of the data. It is up to the application to ensure the buffer remains valid for
the lifetime of the drwav object.

The buffer should contain the contents of the entire wave file, not just the sample data.
*/
DRWAV_API drwav_bool32 drwav_init_memory(drwav* pWav, const void* data, size_t dataSize, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API drwav_bool32 drwav_init_memory_ex(drwav* pWav, const void* data, size_t dataSize, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks);

/*
Helper for initializing a writer which outputs data to a memory buffer.

dr_wav will manage the memory allocations, however it is up to the caller to free the data with drwav_free().

The buffer will remain allocated even after drwav_uninit() is called. The buffer should not be considered valid
until after drwav_uninit() has been called.
*/
DRWAV_API drwav_bool32 drwav_init_memory_write(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API drwav_bool32 drwav_init_memory_write_sequential(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API drwav_bool32 drwav_init_memory_write_sequential_pcm_frames(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks);


#ifndef DR_WAV_NO_CONVERSION_API
/*
Opens and reads an entire wav file in a single operation.

The return value is a heap-allocated buffer containing the audio data. Use drwav_free() to free the buffer.
*/
DRWAV_API drwav_int16* drwav_open_and_read_pcm_frames_s16(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API float* drwav_open_and_read_pcm_frames_f32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API drwav_int32* drwav_open_and_read_pcm_frames_s32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
#ifndef DR_WAV_NO_STDIO
/*
Opens and decodes an entire wav file in a single operation.

The return value is a heap-allocated buffer containing the audio data. Use drwav_free() to free the buffer.
*/
DRWAV_API drwav_int16* drwav_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API float* drwav_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API drwav_int32* drwav_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API drwav_int16* drwav_open_file_and_read_pcm_frames_s16_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API float* drwav_open_file_and_read_pcm_frames_f32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API drwav_int32* drwav_open_file_and_read_pcm_frames_s32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
#endif
/*
Opens and decodes an entire wav file from a block of memory in a single operation.

The return value is a heap-allocated buffer containing the audio data. Use drwav_free() to free the buffer.
*/
DRWAV_API drwav_int16* drwav_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API float* drwav_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
DRWAV_API drwav_int32* drwav_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
#endif

/* Frees data that was allocated internally by dr_wav. */
DRWAV_API void drwav_free(void* p, const drwav_allocation_callbacks* pAllocationCallbacks);

/* Converts bytes from a wav stream to a sized type of native endian. */
DRWAV_API drwav_uint16 drwav_bytes_to_u16(const drwav_uint8* data);
DRWAV_API drwav_int16 drwav_bytes_to_s16(const drwav_uint8* data);
DRWAV_API drwav_uint32 drwav_bytes_to_u32(const drwav_uint8* data);
DRWAV_API drwav_int32 drwav_bytes_to_s32(const drwav_uint8* data);
DRWAV_API drwav_uint64 drwav_bytes_to_u64(const drwav_uint8* data);
DRWAV_API drwav_int64 drwav_bytes_to_s64(const drwav_uint8* data);

/* Compares a GUID for the purpose of checking the type of a Wave64 chunk. */
DRWAV_API drwav_bool32 drwav_guid_equal(const drwav_uint8 a[16], const drwav_uint8 b[16]);

/* Compares a four-character-code for the purpose of checking the type of a RIFF chunk. */
DRWAV_API drwav_bool32 drwav_fourcc_equal(const drwav_uint8* a, const char* b);

#ifdef __cplusplus
}
#endif
#endif  /* dr_wav_h */


/************************************************************************************************************************************************************
 ************************************************************************************************************************************************************

 IMPLEMENTATION

 ************************************************************************************************************************************************************
 ************************************************************************************************************************************************************/
#if defined(DR_WAV_IMPLEMENTATION) || defined(DRWAV_IMPLEMENTATION)
#ifndef dr_wav_c
#define dr_wav_c

#include <stdlib.h>
#include <string.h> /* For memcpy(), memset() */
#include <limits.h> /* For INT_MAX */

#ifndef DR_WAV_NO_STDIO
#include <stdio.h>
#include <wchar.h>
#endif

/* Standard library stuff. */
#ifndef DRWAV_ASSERT
#include <assert.h>
#define DRWAV_ASSERT(expression)           assert(expression)
#endif
#ifndef DRWAV_MALLOC
#define DRWAV_MALLOC(sz)                   malloc((sz))
#endif
#ifndef DRWAV_REALLOC
#define DRWAV_REALLOC(p, sz)               realloc((p), (sz))
#endif
#ifndef DRWAV_FREE
#define DRWAV_FREE(p)                      free((p))
#endif
#ifndef DRWAV_COPY_MEMORY
#define DRWAV_COPY_MEMORY(dst, src, sz)    memcpy((dst), (src), (sz))
#endif
#ifndef DRWAV_ZERO_MEMORY
#define DRWAV_ZERO_MEMORY(p, sz)           memset((p), 0, (sz))
#endif
#ifndef DRWAV_ZERO_OBJECT
#define DRWAV_ZERO_OBJECT(p)               DRWAV_ZERO_MEMORY((p), sizeof(*p))
#endif

#define drwav_countof(x)                   (sizeof(x) / sizeof(x[0]))
#define drwav_align(x, a)                  ((((x) + (a) - 1) / (a)) * (a))
#define drwav_min(a, b)                    (((a) < (b)) ? (a) : (b))
#define drwav_max(a, b)                    (((a) > (b)) ? (a) : (b))
#define drwav_clamp(x, lo, hi)             (drwav_max((lo), drwav_min((hi), (x))))

#define DRWAV_MAX_SIMD_VECTOR_SIZE         64  /* 64 for AVX-512 in the future. */

/* CPU architecture. */
#if defined(__x86_64__) || defined(_M_X64)
    #define DRWAV_X64
#elif defined(__i386) || defined(_M_IX86)
    #define DRWAV_X86
#elif defined(__arm__) || defined(_M_ARM)
    #define DRWAV_ARM
#endif

#ifdef _MSC_VER
    #define DRWAV_INLINE __forceinline
#elif defined(__GNUC__)
    /*
    I've had a bug report where GCC is emitting warnings about functions possibly not being inlineable. This warning happens when
    the __attribute__((always_inline)) attribute is defined without an "inline" statement. I think therefore there must be some
    case where "__inline__" is not always defined, thus the compiler emitting these warnings. When using -std=c89 or -ansi on the
    command line, we cannot use the "inline" keyword and instead need to use "__inline__". In an attempt to work around this issue
    I am using "__inline__" only when we're compiling in strict ANSI mode.
    */
    #if defined(__STRICT_ANSI__)
        #define DRWAV_INLINE __inline__ __attribute__((always_inline))
    #else
        #define DRWAV_INLINE inline __attribute__((always_inline))
    #endif
#elif defined(__WATCOMC__)
    #define DRWAV_INLINE __inline
#else
    #define DRWAV_INLINE
#endif

#if defined(SIZE_MAX)
    #define DRWAV_SIZE_MAX  SIZE_MAX
#else
    #if defined(_WIN64) || defined(_LP64) || defined(__LP64__)
        #define DRWAV_SIZE_MAX  ((drwav_uint64)0xFFFFFFFFFFFFFFFF)
    #else
        #define DRWAV_SIZE_MAX  0xFFFFFFFF
    #endif
#endif

#if defined(_MSC_VER) && _MSC_VER >= 1400
    #define DRWAV_HAS_BYTESWAP16_INTRINSIC
    #define DRWAV_HAS_BYTESWAP32_INTRINSIC
    #define DRWAV_HAS_BYTESWAP64_INTRINSIC
#elif defined(__clang__)
    #if defined(__has_builtin)
        #if __has_builtin(__builtin_bswap16)
            #define DRWAV_HAS_BYTESWAP16_INTRINSIC
        #endif
        #if __has_builtin(__builtin_bswap32)
            #define DRWAV_HAS_BYTESWAP32_INTRINSIC
        #endif
        #if __has_builtin(__builtin_bswap64)
            #define DRWAV_HAS_BYTESWAP64_INTRINSIC
        #endif
    #endif
#elif defined(__GNUC__)
    #if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
        #define DRWAV_HAS_BYTESWAP32_INTRINSIC
        #define DRWAV_HAS_BYTESWAP64_INTRINSIC
    #endif
    #if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))
        #define DRWAV_HAS_BYTESWAP16_INTRINSIC
    #endif
#endif

DRWAV_API void drwav_version(drwav_uint32* pMajor, drwav_uint32* pMinor, drwav_uint32* pRevision)
{
    if (pMajor) {
        *pMajor = DRWAV_VERSION_MAJOR;
    }

    if (pMinor) {
        *pMinor = DRWAV_VERSION_MINOR;
    }

    if (pRevision) {
        *pRevision = DRWAV_VERSION_REVISION;
    }
}

DRWAV_API const char* drwav_version_string(void)
{
    return DRWAV_VERSION_STRING;
}

/*
These limits are used for basic validation when initializing the decoder. If you exceed these limits, first of all: what on Earth are
you doing?! (Let me know, I'd be curious!) Second, you can adjust these by #define-ing them before the dr_wav implementation.
*/
#ifndef DRWAV_MAX_SAMPLE_RATE
#define DRWAV_MAX_SAMPLE_RATE       384000
#endif
#ifndef DRWAV_MAX_CHANNELS
#define DRWAV_MAX_CHANNELS          256
#endif
#ifndef DRWAV_MAX_BITS_PER_SAMPLE
#define DRWAV_MAX_BITS_PER_SAMPLE   64
#endif

static const drwav_uint8 drwavGUID_W64_RIFF[16] = {0x72,0x69,0x66,0x66, 0x2E,0x91, 0xCF,0x11, 0xA5,0xD6, 0x28,0xDB,0x04,0xC1,0x00,0x00};    /* 66666972-912E-11CF-A5D6-28DB04C10000 */
static const drwav_uint8 drwavGUID_W64_WAVE[16] = {0x77,0x61,0x76,0x65, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A};    /* 65766177-ACF3-11D3-8CD1-00C04F8EDB8A */
/*static const drwav_uint8 drwavGUID_W64_JUNK[16] = {0x6A,0x75,0x6E,0x6B, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A};*/    /* 6B6E756A-ACF3-11D3-8CD1-00C04F8EDB8A */
static const drwav_uint8 drwavGUID_W64_FMT [16] = {0x66,0x6D,0x74,0x20, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A};    /* 20746D66-ACF3-11D3-8CD1-00C04F8EDB8A */
static const drwav_uint8 drwavGUID_W64_FACT[16] = {0x66,0x61,0x63,0x74, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A};    /* 74636166-ACF3-11D3-8CD1-00C04F8EDB8A */
static const drwav_uint8 drwavGUID_W64_DATA[16] = {0x64,0x61,0x74,0x61, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A};    /* 61746164-ACF3-11D3-8CD1-00C04F8EDB8A */
static const drwav_uint8 drwavGUID_W64_SMPL[16] = {0x73,0x6D,0x70,0x6C, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A};    /* 6C706D73-ACF3-11D3-8CD1-00C04F8EDB8A */

static DRWAV_INLINE drwav_bool32 drwav__guid_equal(const drwav_uint8 a[16], const drwav_uint8 b[16])
{
    int i;
    for (i = 0; i < 16; i += 1) {
        if (a[i] != b[i]) {
            return DRWAV_FALSE;
        }
    }

    return DRWAV_TRUE;
}

static DRWAV_INLINE drwav_bool32 drwav__fourcc_equal(const drwav_uint8* a, const char* b)
{
    return
        a[0] == b[0] &&
        a[1] == b[1] &&
        a[2] == b[2] &&
        a[3] == b[3];
}



static DRWAV_INLINE int drwav__is_little_endian(void)
{
#if defined(DRWAV_X86) || defined(DRWAV_X64)
    return DRWAV_TRUE;
#elif defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && __BYTE_ORDER == __LITTLE_ENDIAN
    return DRWAV_TRUE;
#else
    int n = 1;
    return (*(char*)&n) == 1;
#endif
}

static DRWAV_INLINE drwav_uint16 drwav__bytes_to_u16(const drwav_uint8* data)
{
    return (data[0] << 0) | (data[1] << 8);
}

static DRWAV_INLINE drwav_int16 drwav__bytes_to_s16(const drwav_uint8* data)
{
    return (short)drwav__bytes_to_u16(data);
}

static DRWAV_INLINE drwav_uint32 drwav__bytes_to_u32(const drwav_uint8* data)
{
    return (data[0] << 0) | (data[1] << 8) | (data[2] << 16) | (data[3] << 24);
}

static DRWAV_INLINE drwav_int32 drwav__bytes_to_s32(const drwav_uint8* data)
{
    return (drwav_int32)drwav__bytes_to_u32(data);
}

static DRWAV_INLINE drwav_uint64 drwav__bytes_to_u64(const drwav_uint8* data)
{
    return
        ((drwav_uint64)data[0] <<  0) | ((drwav_uint64)data[1] <<  8) | ((drwav_uint64)data[2] << 16) | ((drwav_uint64)data[3] << 24) |
        ((drwav_uint64)data[4] << 32) | ((drwav_uint64)data[5] << 40) | ((drwav_uint64)data[6] << 48) | ((drwav_uint64)data[7] << 56);
}

static DRWAV_INLINE drwav_int64 drwav__bytes_to_s64(const drwav_uint8* data)
{
    return (drwav_int64)drwav__bytes_to_u64(data);
}

static DRWAV_INLINE void drwav__bytes_to_guid(const drwav_uint8* data, drwav_uint8* guid)
{
    int i;
    for (i = 0; i < 16; ++i) {
        guid[i] = data[i];
    }
}


static DRWAV_INLINE drwav_uint16 drwav__bswap16(drwav_uint16 n)
{
#ifdef DRWAV_HAS_BYTESWAP16_INTRINSIC
    #if defined(_MSC_VER)
        return _byteswap_ushort(n);
    #elif defined(__GNUC__) || defined(__clang__)
        return __builtin_bswap16(n);
    #else
        #error "This compiler does not support the byte swap intrinsic."
    #endif
#else
    return ((n & 0xFF00) >> 8) |
           ((n & 0x00FF) << 8);
#endif
}

static DRWAV_INLINE drwav_uint32 drwav__bswap32(drwav_uint32 n)
{
#ifdef DRWAV_HAS_BYTESWAP32_INTRINSIC
    #if defined(_MSC_VER)
        return _byteswap_ulong(n);
    #elif defined(__GNUC__) || defined(__clang__)
        #if defined(DRWAV_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 6) && !defined(DRWAV_64BIT)   /* <-- 64-bit inline assembly has not been tested, so disabling for now. */
            /* Inline assembly optimized implementation for ARM. In my testing, GCC does not generate optimized code with __builtin_bswap32(). */
            drwav_uint32 r;
            __asm__ __volatile__ (
            #if defined(DRWAV_64BIT)
                "rev %w[out], %w[in]" : [out]"=r"(r) : [in]"r"(n)   /* <-- This is untested. If someone in the community could test this, that would be appreciated! */
            #else
                "rev %[out], %[in]" : [out]"=r"(r) : [in]"r"(n)
            #endif
            );
            return r;
        #else
            return __builtin_bswap32(n);
        #endif
    #else
        #error "This compiler does not support the byte swap intrinsic."
    #endif
#else
    return ((n & 0xFF000000) >> 24) |
           ((n & 0x00FF0000) >>  8) |
           ((n & 0x0000FF00) <<  8) |
           ((n & 0x000000FF) << 24);
#endif
}

static DRWAV_INLINE drwav_uint64 drwav__bswap64(drwav_uint64 n)
{
#ifdef DRWAV_HAS_BYTESWAP64_INTRINSIC
    #if defined(_MSC_VER)
        return _byteswap_uint64(n);
    #elif defined(__GNUC__) || defined(__clang__)
        return __builtin_bswap64(n);
    #else
        #error "This compiler does not support the byte swap intrinsic."
    #endif
#else
    /* Weird "<< 32" bitshift is required for C89 because it doesn't support 64-bit constants. Should be optimized out by a good compiler. */
    return ((n & ((drwav_uint64)0xFF000000 << 32)) >> 56) |
           ((n & ((drwav_uint64)0x00FF0000 << 32)) >> 40) |
           ((n & ((drwav_uint64)0x0000FF00 << 32)) >> 24) |
           ((n & ((drwav_uint64)0x000000FF << 32)) >>  8) |
           ((n & ((drwav_uint64)0xFF000000      )) <<  8) |
           ((n & ((drwav_uint64)0x00FF0000      )) << 24) |
           ((n & ((drwav_uint64)0x0000FF00      )) << 40) |
           ((n & ((drwav_uint64)0x000000FF      )) << 56);
#endif
}


static DRWAV_INLINE drwav_int16 drwav__bswap_s16(drwav_int16 n)
{
    return (drwav_int16)drwav__bswap16((drwav_uint16)n);
}

static DRWAV_INLINE void drwav__bswap_samples_s16(drwav_int16* pSamples, drwav_uint64 sampleCount)
{
    drwav_uint64 iSample;
    for (iSample = 0; iSample < sampleCount; iSample += 1) {
        pSamples[iSample] = drwav__bswap_s16(pSamples[iSample]);
    }
}


static DRWAV_INLINE void drwav__bswap_s24(drwav_uint8* p)
{
    drwav_uint8 t;
    t = p[0];
    p[0] = p[2];
    p[2] = t;
}

static DRWAV_INLINE void drwav__bswap_samples_s24(drwav_uint8* pSamples, drwav_uint64 sampleCount)
{
    drwav_uint64 iSample;
    for (iSample = 0; iSample < sampleCount; iSample += 1) {
        drwav_uint8* pSample = pSamples + (iSample*3);
        drwav__bswap_s24(pSample);
    }
}


static DRWAV_INLINE drwav_int32 drwav__bswap_s32(drwav_int32 n)
{
    return (drwav_int32)drwav__bswap32((drwav_uint32)n);
}

static DRWAV_INLINE void drwav__bswap_samples_s32(drwav_int32* pSamples, drwav_uint64 sampleCount)
{
    drwav_uint64 iSample;
    for (iSample = 0; iSample < sampleCount; iSample += 1) {
        pSamples[iSample] = drwav__bswap_s32(pSamples[iSample]);
    }
}


static DRWAV_INLINE float drwav__bswap_f32(float n)
{
    union {
        drwav_uint32 i;
        float f;
    } x;
    x.f = n;
    x.i = drwav__bswap32(x.i);

    return x.f;
}

static DRWAV_INLINE void drwav__bswap_samples_f32(float* pSamples, drwav_uint64 sampleCount)
{
    drwav_uint64 iSample;
    for (iSample = 0; iSample < sampleCount; iSample += 1) {
        pSamples[iSample] = drwav__bswap_f32(pSamples[iSample]);
    }
}


static DRWAV_INLINE double drwav__bswap_f64(double n)
{
    union {
        drwav_uint64 i;
        double f;
    } x;
    x.f = n;
    x.i = drwav__bswap64(x.i);

    return x.f;
}

static DRWAV_INLINE void drwav__bswap_samples_f64(double* pSamples, drwav_uint64 sampleCount)
{
    drwav_uint64 iSample;
    for (iSample = 0; iSample < sampleCount; iSample += 1) {
        pSamples[iSample] = drwav__bswap_f64(pSamples[iSample]);
    }
}


static DRWAV_INLINE void drwav__bswap_samples_pcm(void* pSamples, drwav_uint64 sampleCount, drwav_uint32 bytesPerSample)
{
    /* Assumes integer PCM. Floating point PCM is done in drwav__bswap_samples_ieee(). */
    switch (bytesPerSample)
    {
        case 2: /* s16, s12 (loosely packed) */
        {
            drwav__bswap_samples_s16((drwav_int16*)pSamples, sampleCount);
        } break;
        case 3: /* s24 */
        {
            drwav__bswap_samples_s24((drwav_uint8*)pSamples, sampleCount);
        } break;
        case 4: /* s32 */
        {
            drwav__bswap_samples_s32((drwav_int32*)pSamples, sampleCount);
        } break;
        default:
        {
            /* Unsupported format. */
            DRWAV_ASSERT(DRWAV_FALSE);
        } break;
    }
}

static DRWAV_INLINE void drwav__bswap_samples_ieee(void* pSamples, drwav_uint64 sampleCount, drwav_uint32 bytesPerSample)
{
    switch (bytesPerSample)
    {
    #if 0   /* Contributions welcome for f16 support. */
        case 2: /* f16 */
        {
            drwav__bswap_samples_f16((drwav_float16*)pSamples, sampleCount);
        } break;
    #endif
        case 4: /* f32 */
        {
            drwav__bswap_samples_f32((float*)pSamples, sampleCount);
        } break;
        case 8: /* f64 */
        {
            drwav__bswap_samples_f64((double*)pSamples, sampleCount);
        } break;
        default:
        {
            /* Unsupported format. */
            DRWAV_ASSERT(DRWAV_FALSE);
        } break;
    }
}

static DRWAV_INLINE void drwav__bswap_samples(void* pSamples, drwav_uint64 sampleCount, drwav_uint32 bytesPerSample, drwav_uint16 format)
{
    switch (format)
    {
        case DR_WAVE_FORMAT_PCM:
        {
            drwav__bswap_samples_pcm(pSamples, sampleCount, bytesPerSample);
        } break;

        case DR_WAVE_FORMAT_IEEE_FLOAT:
        {
            drwav__bswap_samples_ieee(pSamples, sampleCount, bytesPerSample);
        } break;

        case DR_WAVE_FORMAT_ALAW:
        case DR_WAVE_FORMAT_MULAW:
        {
            drwav__bswap_samples_s16((drwav_int16*)pSamples, sampleCount);
        } break;

        case DR_WAVE_FORMAT_ADPCM:
        case DR_WAVE_FORMAT_DVI_ADPCM:
        default:
        {
            /* Unsupported format. */
            DRWAV_ASSERT(DRWAV_FALSE);
        } break;
    }
}


static void* drwav__malloc_default(size_t sz, void* pUserData)
{
    (void)pUserData;
    return DRWAV_MALLOC(sz);
}

static void* drwav__realloc_default(void* p, size_t sz, void* pUserData)
{
    (void)pUserData;
    return DRWAV_REALLOC(p, sz);
}

static void drwav__free_default(void* p, void* pUserData)
{
    (void)pUserData;
    DRWAV_FREE(p);
}


static void* drwav__malloc_from_callbacks(size_t sz, const drwav_allocation_callbacks* pAllocationCallbacks)
{
    if (pAllocationCallbacks == NULL) {
        return NULL;
    }

    if (pAllocationCallbacks->onMalloc != NULL) {
        return pAllocationCallbacks->onMalloc(sz, pAllocationCallbacks->pUserData);
    }

    /* Try using realloc(). */
    if (pAllocationCallbacks->onRealloc != NULL) {
        return pAllocationCallbacks->onRealloc(NULL, sz, pAllocationCallbacks->pUserData);
    }

    return NULL;
}

static void* drwav__realloc_from_callbacks(void* p, size_t szNew, size_t szOld, const drwav_allocation_callbacks* pAllocationCallbacks)
{
    if (pAllocationCallbacks == NULL) {
        return NULL;
    }

    if (pAllocationCallbacks->onRealloc != NULL) {
        return pAllocationCallbacks->onRealloc(p, szNew, pAllocationCallbacks->pUserData);
    }

    /* Try emulating realloc() in terms of malloc()/free(). */
    if (pAllocationCallbacks->onMalloc != NULL && pAllocationCallbacks->onFree != NULL) {
        void* p2;

        p2 = pAllocationCallbacks->onMalloc(szNew, pAllocationCallbacks->pUserData);
        if (p2 == NULL) {
            return NULL;
        }

        if (p != NULL) {
            DRWAV_COPY_MEMORY(p2, p, szOld);
            pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData);
        }

        return p2;
    }

    return NULL;
}

static void drwav__free_from_callbacks(void* p, const drwav_allocation_callbacks* pAllocationCallbacks)
{
    if (p == NULL || pAllocationCallbacks == NULL) {
        return;
    }

    if (pAllocationCallbacks->onFree != NULL) {
        pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData);
    }
}


static drwav_allocation_callbacks drwav_copy_allocation_callbacks_or_defaults(const drwav_allocation_callbacks* pAllocationCallbacks)
{
    if (pAllocationCallbacks != NULL) {
        /* Copy. */
        return *pAllocationCallbacks;
    } else {
        /* Defaults. */
        drwav_allocation_callbacks allocationCallbacks;
        allocationCallbacks.pUserData = NULL;
        allocationCallbacks.onMalloc  = drwav__malloc_default;
        allocationCallbacks.onRealloc = drwav__realloc_default;
        allocationCallbacks.onFree    = drwav__free_default;
        return allocationCallbacks;
    }
}


static DRWAV_INLINE drwav_bool32 drwav__is_compressed_format_tag(drwav_uint16 formatTag)
{
    return
        formatTag == DR_WAVE_FORMAT_ADPCM ||
        formatTag == DR_WAVE_FORMAT_DVI_ADPCM;
}

static unsigned int drwav__chunk_padding_size_riff(drwav_uint64 chunkSize)
{
    return (unsigned int)(chunkSize % 2);
}

static unsigned int drwav__chunk_padding_size_w64(drwav_uint64 chunkSize)
{
    return (unsigned int)(chunkSize % 8);
}

static drwav_uint64 drwav_read_pcm_frames_s16__msadpcm(drwav* pWav, drwav_uint64 samplesToRead, drwav_int16* pBufferOut);
static drwav_uint64 drwav_read_pcm_frames_s16__ima(drwav* pWav, drwav_uint64 samplesToRead, drwav_int16* pBufferOut);
static drwav_bool32 drwav_init_write__internal(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount);

static drwav_result drwav__read_chunk_header(drwav_read_proc onRead, void* pUserData, drwav_container container, drwav_uint64* pRunningBytesReadOut, drwav_chunk_header* pHeaderOut)
{
    if (container == drwav_container_riff || container == drwav_container_rf64) {
        drwav_uint8 sizeInBytes[4];

        if (onRead(pUserData, pHeaderOut->id.fourcc, 4) != 4) {
            return DRWAV_AT_END;
        }

        if (onRead(pUserData, sizeInBytes, 4) != 4) {
            return DRWAV_INVALID_FILE;
        }

        pHeaderOut->sizeInBytes = drwav__bytes_to_u32(sizeInBytes);
        pHeaderOut->paddingSize = drwav__chunk_padding_size_riff(pHeaderOut->sizeInBytes);
        *pRunningBytesReadOut += 8;
    } else {
        drwav_uint8 sizeInBytes[8];

        if (onRead(pUserData, pHeaderOut->id.guid, 16) != 16) {
            return DRWAV_AT_END;
        }

        if (onRead(pUserData, sizeInBytes, 8) != 8) {
            return DRWAV_INVALID_FILE;
        }

        pHeaderOut->sizeInBytes = drwav__bytes_to_u64(sizeInBytes) - 24;    /* <-- Subtract 24 because w64 includes the size of the header. */
        pHeaderOut->paddingSize = drwav__chunk_padding_size_w64(pHeaderOut->sizeInBytes);
        *pRunningBytesReadOut += 24;
    }

    return DRWAV_SUCCESS;
}

static drwav_bool32 drwav__seek_forward(drwav_seek_proc onSeek, drwav_uint64 offset, void* pUserData)
{
    drwav_uint64 bytesRemainingToSeek = offset;
    while (bytesRemainingToSeek > 0) {
        if (bytesRemainingToSeek > 0x7FFFFFFF) {
            if (!onSeek(pUserData, 0x7FFFFFFF, drwav_seek_origin_current)) {
                return DRWAV_FALSE;
            }
            bytesRemainingToSeek -= 0x7FFFFFFF;
        } else {
            if (!onSeek(pUserData, (int)bytesRemainingToSeek, drwav_seek_origin_current)) {
                return DRWAV_FALSE;
            }
            bytesRemainingToSeek = 0;
        }
    }

    return DRWAV_TRUE;
}

static drwav_bool32 drwav__seek_from_start(drwav_seek_proc onSeek, drwav_uint64 offset, void* pUserData)
{
    if (offset <= 0x7FFFFFFF) {
        return onSeek(pUserData, (int)offset, drwav_seek_origin_start);
    }

    /* Larger than 32-bit seek. */
    if (!onSeek(pUserData, 0x7FFFFFFF, drwav_seek_origin_start)) {
        return DRWAV_FALSE;
    }
    offset -= 0x7FFFFFFF;

    for (;;) {
        if (offset <= 0x7FFFFFFF) {
            return onSeek(pUserData, (int)offset, drwav_seek_origin_current);
        }

        if (!onSeek(pUserData, 0x7FFFFFFF, drwav_seek_origin_current)) {
            return DRWAV_FALSE;
        }
        offset -= 0x7FFFFFFF;
    }

    /* Should never get here. */
    /*return DRWAV_TRUE; */
}


static drwav_bool32 drwav__read_fmt(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, drwav_container container, drwav_uint64* pRunningBytesReadOut, drwav_fmt* fmtOut)
{
    drwav_chunk_header header;
    drwav_uint8 fmt[16];

    if (drwav__read_chunk_header(onRead, pUserData, container, pRunningBytesReadOut, &header) != DRWAV_SUCCESS) {
        return DRWAV_FALSE;
    }


    /* Skip non-fmt chunks. */
    while (((container == drwav_container_riff || container == drwav_container_rf64) && !drwav__fourcc_equal(header.id.fourcc, "fmt ")) || (container == drwav_container_w64 && !drwav__guid_equal(header.id.guid, drwavGUID_W64_FMT))) {
        if (!drwav__seek_forward(onSeek, header.sizeInBytes + header.paddingSize, pUserData)) {
            return DRWAV_FALSE;
        }
        *pRunningBytesReadOut += header.sizeInBytes + header.paddingSize;

        /* Try the next header. */
        if (drwav__read_chunk_header(onRead, pUserData, container, pRunningBytesReadOut, &header) != DRWAV_SUCCESS) {
            return DRWAV_FALSE;
        }
    }


    /* Validation. */
    if (container == drwav_container_riff || container == drwav_container_rf64) {
        if (!drwav__fourcc_equal(header.id.fourcc, "fmt ")) {
            return DRWAV_FALSE;
        }
    } else {
        if (!drwav__guid_equal(header.id.guid, drwavGUID_W64_FMT)) {
            return DRWAV_FALSE;
        }
    }


    if (onRead(pUserData, fmt, sizeof(fmt)) != sizeof(fmt)) {
        return DRWAV_FALSE;
    }
    *pRunningBytesReadOut += sizeof(fmt);

    fmtOut->formatTag      = drwav__bytes_to_u16(fmt + 0);
    fmtOut->channels       = drwav__bytes_to_u16(fmt + 2);
    fmtOut->sampleRate     = drwav__bytes_to_u32(fmt + 4);
    fmtOut->avgBytesPerSec = drwav__bytes_to_u32(fmt + 8);
    fmtOut->blockAlign     = drwav__bytes_to_u16(fmt + 12);
    fmtOut->bitsPerSample  = drwav__bytes_to_u16(fmt + 14);

    fmtOut->extendedSize       = 0;
    fmtOut->validBitsPerSample = 0;
    fmtOut->channelMask        = 0;
    memset(fmtOut->subFormat, 0, sizeof(fmtOut->subFormat));

    if (header.sizeInBytes > 16) {
        drwav_uint8 fmt_cbSize[2];
        int bytesReadSoFar = 0;

        if (onRead(pUserData, fmt_cbSize, sizeof(fmt_cbSize)) != sizeof(fmt_cbSize)) {
            return DRWAV_FALSE;    /* Expecting more data. */
        }
        *pRunningBytesReadOut += sizeof(fmt_cbSize);

        bytesReadSoFar = 18;

        fmtOut->extendedSize = drwav__bytes_to_u16(fmt_cbSize);
        if (fmtOut->extendedSize > 0) {
            /* Simple validation. */
            if (fmtOut->formatTag == DR_WAVE_FORMAT_EXTENSIBLE) {
                if (fmtOut->extendedSize != 22) {
                    return DRWAV_FALSE;
                }
            }

            if (fmtOut->formatTag == DR_WAVE_FORMAT_EXTENSIBLE) {
                drwav_uint8 fmtext[22];
                if (onRead(pUserData, fmtext, fmtOut->extendedSize) != fmtOut->extendedSize) {
                    return DRWAV_FALSE;    /* Expecting more data. */
                }

                fmtOut->validBitsPerSample = drwav__bytes_to_u16(fmtext + 0);
                fmtOut->channelMask        = drwav__bytes_to_u32(fmtext + 2);
                drwav__bytes_to_guid(fmtext + 6, fmtOut->subFormat);
            } else {
                if (!onSeek(pUserData, fmtOut->extendedSize, drwav_seek_origin_current)) {
                    return DRWAV_FALSE;
                }
            }
            *pRunningBytesReadOut += fmtOut->extendedSize;

            bytesReadSoFar += fmtOut->extendedSize;
        }

        /* Seek past any leftover bytes. For w64 the leftover will be defined based on the chunk size. */
        if (!onSeek(pUserData, (int)(header.sizeInBytes - bytesReadSoFar), drwav_seek_origin_current)) {
            return DRWAV_FALSE;
        }
        *pRunningBytesReadOut += (header.sizeInBytes - bytesReadSoFar);
    }

    if (header.paddingSize > 0) {
        if (!onSeek(pUserData, header.paddingSize, drwav_seek_origin_current)) {
            return DRWAV_FALSE;
        }
        *pRunningBytesReadOut += header.paddingSize;
    }

    return DRWAV_TRUE;
}


static size_t drwav__on_read(drwav_read_proc onRead, void* pUserData, void* pBufferOut, size_t bytesToRead, drwav_uint64* pCursor)
{
    size_t bytesRead;

    DRWAV_ASSERT(onRead != NULL);
    DRWAV_ASSERT(pCursor != NULL);

    bytesRead = onRead(pUserData, pBufferOut, bytesToRead);
    *pCursor += bytesRead;
    return bytesRead;
}

#if 0
static drwav_bool32 drwav__on_seek(drwav_seek_proc onSeek, void* pUserData, int offset, drwav_seek_origin origin, drwav_uint64* pCursor)
{
    DRWAV_ASSERT(onSeek != NULL);
    DRWAV_ASSERT(pCursor != NULL);

    if (!onSeek(pUserData, offset, origin)) {
        return DRWAV_FALSE;
    }

    if (origin == drwav_seek_origin_start) {
        *pCursor = offset;
    } else {
        *pCursor += offset;
    }

    return DRWAV_TRUE;
}
#endif



static drwav_uint32 drwav_get_bytes_per_pcm_frame(drwav* pWav)
{
    /*
    The bytes per frame is a bit ambiguous. It can be either be based on the bits per sample, or the block align. The way I'm doing it here
    is that if the bits per sample is a multiple of 8, use floor(bitsPerSample*channels/8), otherwise fall back to the block align.
    */
    if ((pWav->bitsPerSample & 0x7) == 0) {
        /* Bits per sample is a multiple of 8. */
        return (pWav->bitsPerSample * pWav->fmt.channels) >> 3;
    } else {
        return pWav->fmt.blockAlign;
    }
}

DRWAV_API drwav_uint16 drwav_fmt_get_format(const drwav_fmt* pFMT)
{
    if (pFMT == NULL) {
        return 0;
    }

    if (pFMT->formatTag != DR_WAVE_FORMAT_EXTENSIBLE) {
        return pFMT->formatTag;
    } else {
        return drwav__bytes_to_u16(pFMT->subFormat);    /* Only the first two bytes are required. */
    }
}

static drwav_bool32 drwav_preinit(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pReadSeekUserData, const drwav_allocation_callbacks* pAllocationCallbacks)
{
    if (pWav == NULL || onRead == NULL || onSeek == NULL) {
        return DRWAV_FALSE;
    }

    DRWAV_ZERO_MEMORY(pWav, sizeof(*pWav));
    pWav->onRead    = onRead;
    pWav->onSeek    = onSeek;
    pWav->pUserData = pReadSeekUserData;
    pWav->allocationCallbacks = drwav_copy_allocation_callbacks_or_defaults(pAllocationCallbacks);

    if (pWav->allocationCallbacks.onFree == NULL || (pWav->allocationCallbacks.onMalloc == NULL && pWav->allocationCallbacks.onRealloc == NULL)) {
        return DRWAV_FALSE;    /* Invalid allocation callbacks. */
    }

    return DRWAV_TRUE;
}

static drwav_bool32 drwav_init__internal(drwav* pWav, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags)
{
    /* This function assumes drwav_preinit() has been called beforehand. */

    drwav_uint64 cursor;    /* <-- Keeps track of the byte position so we can seek to specific locations. */
    drwav_bool32 sequential;
    drwav_uint8 riff[4];
    drwav_fmt fmt;
    unsigned short translatedFormatTag;
    drwav_bool32 foundDataChunk;
    drwav_uint64 dataChunkSize = 0; /* <-- Important! Don't explicitly set this to 0 anywhere else. Calculation of the size of the data chunk is performed in different paths depending on the container. */
    drwav_uint64 sampleCountFromFactChunk = 0;  /* Same as dataChunkSize - make sure this is the only place this is initialized to 0. */
    drwav_uint64 chunkSize;

    cursor = 0;
    sequential = (flags & DRWAV_SEQUENTIAL) != 0;

    /* The first 4 bytes should be the RIFF identifier. */
    if (drwav__on_read(pWav->onRead, pWav->pUserData, riff, sizeof(riff), &cursor) != sizeof(riff)) {
        return DRWAV_FALSE;
    }

    /*
    The first 4 bytes can be used to identify the container. For RIFF files it will start with "RIFF" and for
    w64 it will start with "riff".
    */
    if (drwav__fourcc_equal(riff, "RIFF")) {
        pWav->container = drwav_container_riff;
    } else if (drwav__fourcc_equal(riff, "riff")) {
        int i;
        drwav_uint8 riff2[12];

        pWav->container = drwav_container_w64;

        /* Check the rest of the GUID for validity. */
        if (drwav__on_read(pWav->onRead, pWav->pUserData, riff2, sizeof(riff2), &cursor) != sizeof(riff2)) {
            return DRWAV_FALSE;
        }

        for (i = 0; i < 12; ++i) {
            if (riff2[i] != drwavGUID_W64_RIFF[i+4]) {
                return DRWAV_FALSE;
            }
        }
    } else if (drwav__fourcc_equal(riff, "RF64")) {
        pWav->container = drwav_container_rf64;
    } else {
        return DRWAV_FALSE;   /* Unknown or unsupported container. */
    }


    if (pWav->container == drwav_container_riff || pWav->container == drwav_container_rf64) {
        drwav_uint8 chunkSizeBytes[4];
        drwav_uint8 wave[4];

        /* RIFF/WAVE */
        if (drwav__on_read(pWav->onRead, pWav->pUserData, chunkSizeBytes, sizeof(chunkSizeBytes), &cursor) != sizeof(chunkSizeBytes)) {
            return DRWAV_FALSE;
        }

        if (pWav->container == drwav_container_riff) {
            if (drwav__bytes_to_u32(chunkSizeBytes) < 36) {
                return DRWAV_FALSE;    /* Chunk size should always be at least 36 bytes. */
            }
        } else {
            if (drwav__bytes_to_u32(chunkSizeBytes) != 0xFFFFFFFF) {
                return DRWAV_FALSE;    /* Chunk size should always be set to -1/0xFFFFFFFF for RF64. The actual size is retrieved later. */
            }
        }

        if (drwav__on_read(pWav->onRead, pWav->pUserData, wave, sizeof(wave), &cursor) != sizeof(wave)) {
            return DRWAV_FALSE;
        }

        if (!drwav__fourcc_equal(wave, "WAVE")) {
            return DRWAV_FALSE;    /* Expecting "WAVE". */
        }
    } else {
        drwav_uint8 chunkSizeBytes[8];
        drwav_uint8 wave[16];

        /* W64 */
        if (drwav__on_read(pWav->onRead, pWav->pUserData, chunkSizeBytes, sizeof(chunkSizeBytes), &cursor) != sizeof(chunkSizeBytes)) {
            return DRWAV_FALSE;
        }

        if (drwav__bytes_to_u64(chunkSizeBytes) < 80) {
            return DRWAV_FALSE;
        }

        if (drwav__on_read(pWav->onRead, pWav->pUserData, wave, sizeof(wave), &cursor) != sizeof(wave)) {
            return DRWAV_FALSE;
        }

        if (!drwav__guid_equal(wave, drwavGUID_W64_WAVE)) {
            return DRWAV_FALSE;
        }
    }


    /* For RF64, the "ds64" chunk must come next, before the "fmt " chunk. */
    if (pWav->container == drwav_container_rf64) {
        drwav_uint8 sizeBytes[8];
        drwav_uint64 bytesRemainingInChunk;
        drwav_chunk_header header;
        drwav_result result = drwav__read_chunk_header(pWav->onRead, pWav->pUserData, pWav->container, &cursor, &header);
        if (result != DRWAV_SUCCESS) {
            return DRWAV_FALSE;
        }

        if (!drwav__fourcc_equal(header.id.fourcc, "ds64")) {
            return DRWAV_FALSE; /* Expecting "ds64". */
        }

        bytesRemainingInChunk = header.sizeInBytes + header.paddingSize;

        /* We don't care about the size of the RIFF chunk - skip it. */
        if (!drwav__seek_forward(pWav->onSeek, 8, pWav->pUserData)) {
            return DRWAV_FALSE;
        }
        bytesRemainingInChunk -= 8;
        cursor += 8;


        /* Next 8 bytes is the size of the "data" chunk. */
        if (drwav__on_read(pWav->onRead, pWav->pUserData, sizeBytes, sizeof(sizeBytes), &cursor) != sizeof(sizeBytes)) {
            return DRWAV_FALSE;
        }
        bytesRemainingInChunk -= 8;
        dataChunkSize = drwav__bytes_to_u64(sizeBytes);


        /* Next 8 bytes is the same count which we would usually derived from the FACT chunk if it was available. */
        if (drwav__on_read(pWav->onRead, pWav->pUserData, sizeBytes, sizeof(sizeBytes), &cursor) != sizeof(sizeBytes)) {
            return DRWAV_FALSE;
        }
        bytesRemainingInChunk -= 8;
        sampleCountFromFactChunk = drwav__bytes_to_u64(sizeBytes);


        /* Skip over everything else. */
        if (!drwav__seek_forward(pWav->onSeek, bytesRemainingInChunk, pWav->pUserData)) {
            return DRWAV_FALSE;
        }
        cursor += bytesRemainingInChunk;
    }


    /* The next bytes should be the "fmt " chunk. */
    if (!drwav__read_fmt(pWav->onRead, pWav->onSeek, pWav->pUserData, pWav->container, &cursor, &fmt)) {
        return DRWAV_FALSE;    /* Failed to read the "fmt " chunk. */
    }

    /* Basic validation. */
    if ((fmt.sampleRate    == 0 || fmt.sampleRate    > DRWAV_MAX_SAMPLE_RATE)     ||
        (fmt.channels      == 0 || fmt.channels      > DRWAV_MAX_CHANNELS)        ||
        (fmt.bitsPerSample == 0 || fmt.bitsPerSample > DRWAV_MAX_BITS_PER_SAMPLE) ||
        fmt.blockAlign == 0) {
        return DRWAV_FALSE; /* Probably an invalid WAV file. */
    }


    /* Translate the internal format. */
    translatedFormatTag = fmt.formatTag;
    if (translatedFormatTag == DR_WAVE_FORMAT_EXTENSIBLE) {
        translatedFormatTag = drwav__bytes_to_u16(fmt.subFormat + 0);
    }


    /*
    We need to enumerate over each chunk for two reasons:
      1) The "data" chunk may not be the next one
      2) We may want to report each chunk back to the client
    
    In order to correctly report each chunk back to the client we will need to keep looping until the end of the file.
    */
    foundDataChunk = DRWAV_FALSE;

    /* The next chunk we care about is the "data" chunk. This is not necessarily the next chunk so we'll need to loop. */
    for (;;)
    {
        drwav_chunk_header header;
        drwav_result result = drwav__read_chunk_header(pWav->onRead, pWav->pUserData, pWav->container, &cursor, &header);
        if (result != DRWAV_SUCCESS) {
            if (!foundDataChunk) {
                return DRWAV_FALSE;
            } else {
                break;  /* Probably at the end of the file. Get out of the loop. */
            }
        }

        /* Tell the client about this chunk. */
        if (!sequential && onChunk != NULL) {
            drwav_uint64 callbackBytesRead = onChunk(pChunkUserData, pWav->onRead, pWav->onSeek, pWav->pUserData, &header, pWav->container, &fmt);

            /*
            dr_wav may need to read the contents of the chunk, so we now need to seek back to the position before
            we called the callback.
            */
            if (callbackBytesRead > 0) {
                if (!drwav__seek_from_start(pWav->onSeek, cursor, pWav->pUserData)) {
                    return DRWAV_FALSE;
                }
            }
        }
        

        if (!foundDataChunk) {
            pWav->dataChunkDataPos = cursor;
        }

        chunkSize = header.sizeInBytes;
        if (pWav->container == drwav_container_riff || pWav->container == drwav_container_rf64) {
            if (drwav__fourcc_equal(header.id.fourcc, "data")) {
                foundDataChunk = DRWAV_TRUE;
                if (pWav->container != drwav_container_rf64) {  /* The data chunk size for RF64 will always be set to 0xFFFFFFFF here. It was set to it's true value earlier. */
                    dataChunkSize = chunkSize;
                }
            }
        } else {
            if (drwav__guid_equal(header.id.guid, drwavGUID_W64_DATA)) {
                foundDataChunk = DRWAV_TRUE;
                dataChunkSize = chunkSize;
            }
        }

        /*
        If at this point we have found the data chunk and we're running in sequential mode, we need to break out of this loop. The reason for
        this is that we would otherwise require a backwards seek which sequential mode forbids.
        */
        if (foundDataChunk && sequential) {
            break;
        }

        /* Optional. Get the total sample count from the FACT chunk. This is useful for compressed formats. */
        if (pWav->container == drwav_container_riff) {
            if (drwav__fourcc_equal(header.id.fourcc, "fact")) {
                drwav_uint32 sampleCount;
                if (drwav__on_read(pWav->onRead, pWav->pUserData, &sampleCount, 4, &cursor) != 4) {
                    return DRWAV_FALSE;
                }
                chunkSize -= 4;

                if (!foundDataChunk) {
                    pWav->dataChunkDataPos = cursor;
                }

                /*
                The sample count in the "fact" chunk is either unreliable, or I'm not understanding it properly. For now I am only enabling this
                for Microsoft ADPCM formats.
                */
                if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) {
                    sampleCountFromFactChunk = sampleCount;
                } else {
                    sampleCountFromFactChunk = 0;
                }
            }
        } else if (pWav->container == drwav_container_w64) {
            if (drwav__guid_equal(header.id.guid, drwavGUID_W64_FACT)) {
                if (drwav__on_read(pWav->onRead, pWav->pUserData, &sampleCountFromFactChunk, 8, &cursor) != 8) {
                    return DRWAV_FALSE;
                }
                chunkSize -= 8;

                if (!foundDataChunk) {
                    pWav->dataChunkDataPos = cursor;
                }
            }
        } else if (pWav->container == drwav_container_rf64) {
            /* We retrieved the sample count from the ds64 chunk earlier so no need to do that here. */
        }

        /* "smpl" chunk. */
        if (pWav->container == drwav_container_riff || pWav->container == drwav_container_rf64) {
            if (drwav__fourcc_equal(header.id.fourcc, "smpl")) {
                drwav_uint8 smplHeaderData[36];    /* 36 = size of the smpl header section, not including the loop data. */
                if (chunkSize >= sizeof(smplHeaderData)) {
                    drwav_uint64 bytesJustRead = drwav__on_read(pWav->onRead, pWav->pUserData, smplHeaderData, sizeof(smplHeaderData), &cursor);
                    chunkSize -= bytesJustRead;

                    if (bytesJustRead == sizeof(smplHeaderData)) {
                        drwav_uint32 iLoop;

                        pWav->smpl.manufacturer      = drwav__bytes_to_u32(smplHeaderData+0);
                        pWav->smpl.product           = drwav__bytes_to_u32(smplHeaderData+4);
                        pWav->smpl.samplePeriod      = drwav__bytes_to_u32(smplHeaderData+8);
                        pWav->smpl.midiUnityNotes    = drwav__bytes_to_u32(smplHeaderData+12);
                        pWav->smpl.midiPitchFraction = drwav__bytes_to_u32(smplHeaderData+16);
                        pWav->smpl.smpteFormat       = drwav__bytes_to_u32(smplHeaderData+20);
                        pWav->smpl.smpteOffset       = drwav__bytes_to_u32(smplHeaderData+24);
                        pWav->smpl.numSampleLoops    = drwav__bytes_to_u32(smplHeaderData+28);
                        pWav->smpl.samplerData       = drwav__bytes_to_u32(smplHeaderData+32);

                        for (iLoop = 0; iLoop < pWav->smpl.numSampleLoops && iLoop < drwav_countof(pWav->smpl.loops); ++iLoop) {
                            drwav_uint8 smplLoopData[24];  /* 24 = size of a loop section in the smpl chunk. */
                            bytesJustRead = drwav__on_read(pWav->onRead, pWav->pUserData, smplLoopData, sizeof(smplLoopData), &cursor);
                            chunkSize -= bytesJustRead;

                            if (bytesJustRead == sizeof(smplLoopData)) {
                                pWav->smpl.loops[iLoop].cuePointId = drwav__bytes_to_u32(smplLoopData+0);
                                pWav->smpl.loops[iLoop].type       = drwav__bytes_to_u32(smplLoopData+4);
                                pWav->smpl.loops[iLoop].start      = drwav__bytes_to_u32(smplLoopData+8);
                                pWav->smpl.loops[iLoop].end        = drwav__bytes_to_u32(smplLoopData+12);
                                pWav->smpl.loops[iLoop].fraction   = drwav__bytes_to_u32(smplLoopData+16);
                                pWav->smpl.loops[iLoop].playCount  = drwav__bytes_to_u32(smplLoopData+20);
                            } else {
                                break;  /* Break from the smpl loop for loop. */
                            }
                        }
                    }
                } else {
                    /* Looks like invalid data. Ignore the chunk. */
                }
            }
        } else {
            if (drwav__guid_equal(header.id.guid, drwavGUID_W64_SMPL)) {
                /*
                This path will be hit when a W64 WAV file contains a smpl chunk. I don't have a sample file to test this path, so a contribution
                is welcome to add support for this.
                */
            }
        }

        /* Make sure we seek past the padding. */
        chunkSize += header.paddingSize;
        if (!drwav__seek_forward(pWav->onSeek, chunkSize, pWav->pUserData)) {
            break;
        }
        cursor += chunkSize;

        if (!foundDataChunk) {
            pWav->dataChunkDataPos = cursor;
        }
    }

    /* If we haven't found a data chunk, return an error. */
    if (!foundDataChunk) {
        return DRWAV_FALSE;
    }

    /* We may have moved passed the data chunk. If so we need to move back. If running in sequential mode we can assume we are already sitting on the data chunk. */
    if (!sequential) {
        if (!drwav__seek_from_start(pWav->onSeek, pWav->dataChunkDataPos, pWav->pUserData)) {
            return DRWAV_FALSE;
        }
        cursor = pWav->dataChunkDataPos;
    }
    

    /* At this point we should be sitting on the first byte of the raw audio data. */

    pWav->fmt                 = fmt;
    pWav->sampleRate          = fmt.sampleRate;
    pWav->channels            = fmt.channels;
    pWav->bitsPerSample       = fmt.bitsPerSample;
    pWav->bytesRemaining      = dataChunkSize;
    pWav->translatedFormatTag = translatedFormatTag;
    pWav->dataChunkDataSize   = dataChunkSize;

    if (sampleCountFromFactChunk != 0) {
        pWav->totalPCMFrameCount = sampleCountFromFactChunk;
    } else {
        pWav->totalPCMFrameCount = dataChunkSize / drwav_get_bytes_per_pcm_frame(pWav);

        if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) {
            drwav_uint64 totalBlockHeaderSizeInBytes;
            drwav_uint64 blockCount = dataChunkSize / fmt.blockAlign;

            /* Make sure any trailing partial block is accounted for. */
            if ((blockCount * fmt.blockAlign) < dataChunkSize) {
                blockCount += 1;
            }

            /* We decode two samples per byte. There will be blockCount headers in the data chunk. This is enough to know how to calculate the total PCM frame count. */
            totalBlockHeaderSizeInBytes = blockCount * (6*fmt.channels);
            pWav->totalPCMFrameCount = ((dataChunkSize - totalBlockHeaderSizeInBytes) * 2) / fmt.channels;
        }
        if (pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) {
            drwav_uint64 totalBlockHeaderSizeInBytes;
            drwav_uint64 blockCount = dataChunkSize / fmt.blockAlign;

            /* Make sure any trailing partial block is accounted for. */
            if ((blockCount * fmt.blockAlign) < dataChunkSize) {
                blockCount += 1;
            }

            /* We decode two samples per byte. There will be blockCount headers in the data chunk. This is enough to know how to calculate the total PCM frame count. */
            totalBlockHeaderSizeInBytes = blockCount * (4*fmt.channels);
            pWav->totalPCMFrameCount = ((dataChunkSize - totalBlockHeaderSizeInBytes) * 2) / fmt.channels;

            /* The header includes a decoded sample for each channel which acts as the initial predictor sample. */
            pWav->totalPCMFrameCount += blockCount;
        }
    }

    /* Some formats only support a certain number of channels. */
    if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM || pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) {
        if (pWav->channels > 2) {
            return DRWAV_FALSE;
        }
    }

#ifdef DR_WAV_LIBSNDFILE_COMPAT
    /*
    I use libsndfile as a benchmark for testing, however in the version I'm using (from the Windows installer on the libsndfile website),
    it appears the total sample count libsndfile uses for MS-ADPCM is incorrect. It would seem they are computing the total sample count
    from the number of blocks, however this results in the inclusion of extra silent samples at the end of the last block. The correct
    way to know the total sample count is to inspect the "fact" chunk, which should always be present for compressed formats, and should
    always include the sample count. This little block of code below is only used to emulate the libsndfile logic so I can properly run my
    correctness tests against libsndfile, and is disabled by default.
    */
    if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) {
        drwav_uint64 blockCount = dataChunkSize / fmt.blockAlign;
        pWav->totalPCMFrameCount = (((blockCount * (fmt.blockAlign - (6*pWav->channels))) * 2)) / fmt.channels;  /* x2 because two samples per byte. */
    }
    if (pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) {
        drwav_uint64 blockCount = dataChunkSize / fmt.blockAlign;
        pWav->totalPCMFrameCount = (((blockCount * (fmt.blockAlign - (4*pWav->channels))) * 2) + (blockCount * pWav->channels)) / fmt.channels;
    }
#endif

    return DRWAV_TRUE;
}

DRWAV_API drwav_bool32 drwav_init(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks)
{
    return drwav_init_ex(pWav, onRead, onSeek, NULL, pUserData, NULL, 0, pAllocationCallbacks);
}

DRWAV_API drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, drwav_chunk_proc onChunk, void* pReadSeekUserData, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks)
{
    if (!drwav_preinit(pWav, onRead, onSeek, pReadSeekUserData, pAllocationCallbacks)) {
        return DRWAV_FALSE;
    }

    return drwav_init__internal(pWav, onChunk, pChunkUserData, flags);
}


static drwav_uint32 drwav__riff_chunk_size_riff(drwav_uint64 dataChunkSize)
{
    drwav_uint64 chunkSize = 4 + 24 + dataChunkSize + drwav__chunk_padding_size_riff(dataChunkSize); /* 4 = "WAVE". 24 = "fmt " chunk. */
    if (chunkSize > 0xFFFFFFFFUL) {
        chunkSize = 0xFFFFFFFFUL;
    }

    return (drwav_uint32)chunkSize; /* Safe cast due to the clamp above. */
}

static drwav_uint32 drwav__data_chunk_size_riff(drwav_uint64 dataChunkSize)
{
    if (dataChunkSize <= 0xFFFFFFFFUL) {
        return (drwav_uint32)dataChunkSize;
    } else {
        return 0xFFFFFFFFUL;
    }
}

static drwav_uint64 drwav__riff_chunk_size_w64(drwav_uint64 dataChunkSize)
{
    drwav_uint64 dataSubchunkPaddingSize = drwav__chunk_padding_size_w64(dataChunkSize);

    return 80 + 24 + dataChunkSize + dataSubchunkPaddingSize;   /* +24 because W64 includes the size of the GUID and size fields. */
}

static drwav_uint64 drwav__data_chunk_size_w64(drwav_uint64 dataChunkSize)
{
    return 24 + dataChunkSize;        /* +24 because W64 includes the size of the GUID and size fields. */
}

static drwav_uint64 drwav__riff_chunk_size_rf64(drwav_uint64 dataChunkSize)
{
    drwav_uint64 chunkSize = 4 + 36 + 24 + dataChunkSize + drwav__chunk_padding_size_riff(dataChunkSize); /* 4 = "WAVE". 36 = "ds64" chunk. 24 = "fmt " chunk. */
    if (chunkSize > 0xFFFFFFFFUL) {
        chunkSize = 0xFFFFFFFFUL;
    }

    return chunkSize;
}

static drwav_uint64 drwav__data_chunk_size_rf64(drwav_uint64 dataChunkSize)
{
    return dataChunkSize;
}


static size_t drwav__write(drwav* pWav, const void* pData, size_t dataSize)
{
    DRWAV_ASSERT(pWav          != NULL);
    DRWAV_ASSERT(pWav->onWrite != NULL);

    /* Generic write. Assumes no byte reordering required. */
    return pWav->onWrite(pWav->pUserData, pData, dataSize);
}

static size_t drwav__write_u16ne_to_le(drwav* pWav, drwav_uint16 value)
{
    DRWAV_ASSERT(pWav          != NULL);
    DRWAV_ASSERT(pWav->onWrite != NULL);

    if (!drwav__is_little_endian()) {
        value = drwav__bswap16(value);
    }

    return drwav__write(pWav, &value, 2);
}

static size_t drwav__write_u32ne_to_le(drwav* pWav, drwav_uint32 value)
{
    DRWAV_ASSERT(pWav          != NULL);
    DRWAV_ASSERT(pWav->onWrite != NULL);

    if (!drwav__is_little_endian()) {
        value = drwav__bswap32(value);
    }

    return drwav__write(pWav, &value, 4);
}

static size_t drwav__write_u64ne_to_le(drwav* pWav, drwav_uint64 value)
{
    DRWAV_ASSERT(pWav          != NULL);
    DRWAV_ASSERT(pWav->onWrite != NULL);

    if (!drwav__is_little_endian()) {
        value = drwav__bswap64(value);
    }

    return drwav__write(pWav, &value, 8);
}


static drwav_bool32 drwav_preinit_write(drwav* pWav, const drwav_data_format* pFormat, drwav_bool32 isSequential, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks)
{
    if (pWav == NULL || onWrite == NULL) {
        return DRWAV_FALSE;
    }

    if (!isSequential && onSeek == NULL) {
        return DRWAV_FALSE; /* <-- onSeek is required when in non-sequential mode. */
    }

    /* Not currently supporting compressed formats. Will need to add support for the "fact" chunk before we enable this. */
    if (pFormat->format == DR_WAVE_FORMAT_EXTENSIBLE) {
        return DRWAV_FALSE;
    }
    if (pFormat->format == DR_WAVE_FORMAT_ADPCM || pFormat->format == DR_WAVE_FORMAT_DVI_ADPCM) {
        return DRWAV_FALSE;
    }

    DRWAV_ZERO_MEMORY(pWav, sizeof(*pWav));
    pWav->onWrite   = onWrite;
    pWav->onSeek    = onSeek;
    pWav->pUserData = pUserData;
    pWav->allocationCallbacks = drwav_copy_allocation_callbacks_or_defaults(pAllocationCallbacks);

    if (pWav->allocationCallbacks.onFree == NULL || (pWav->allocationCallbacks.onMalloc == NULL && pWav->allocationCallbacks.onRealloc == NULL)) {
        return DRWAV_FALSE;    /* Invalid allocation callbacks. */
    }

    pWav->fmt.formatTag = (drwav_uint16)pFormat->format;
    pWav->fmt.channels = (drwav_uint16)pFormat->channels;
    pWav->fmt.sampleRate = pFormat->sampleRate;
    pWav->fmt.avgBytesPerSec = (drwav_uint32)((pFormat->bitsPerSample * pFormat->sampleRate * pFormat->channels) / 8);
    pWav->fmt.blockAlign = (drwav_uint16)((pFormat->channels * pFormat->bitsPerSample) / 8);
    pWav->fmt.bitsPerSample = (drwav_uint16)pFormat->bitsPerSample;
    pWav->fmt.extendedSize = 0;
    pWav->isSequentialWrite = isSequential;

    return DRWAV_TRUE;
}

static drwav_bool32 drwav_init_write__internal(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount)
{
    /* The function assumes drwav_preinit_write() was called beforehand. */

    size_t runningPos = 0;
    drwav_uint64 initialDataChunkSize = 0;
    drwav_uint64 chunkSizeFMT;

    /*
    The initial values for the "RIFF" and "data" chunks depends on whether or not we are initializing in sequential mode or not. In
    sequential mode we set this to its final values straight away since they can be calculated from the total sample count. In non-
    sequential mode we initialize it all to zero and fill it out in drwav_uninit() using a backwards seek.
    */
    if (pWav->isSequentialWrite) {
        initialDataChunkSize = (totalSampleCount * pWav->fmt.bitsPerSample) / 8;

        /*
        The RIFF container has a limit on the number of samples. drwav is not allowing this. There's no practical limits for Wave64
        so for the sake of simplicity I'm not doing any validation for that.
        */
        if (pFormat->container == drwav_container_riff) {
            if (initialDataChunkSize > (0xFFFFFFFFUL - 36)) {
                return DRWAV_FALSE; /* Not enough room to store every sample. */
            }
        }
    }

    pWav->dataChunkDataSizeTargetWrite = initialDataChunkSize;


    /* "RIFF" chunk. */
    if (pFormat->container == drwav_container_riff) {
        drwav_uint32 chunkSizeRIFF = 28 + (drwav_uint32)initialDataChunkSize;   /* +28 = "WAVE" + [sizeof "fmt " chunk] */
        runningPos += drwav__write(pWav, "RIFF", 4);
        runningPos += drwav__write_u32ne_to_le(pWav, chunkSizeRIFF);
        runningPos += drwav__write(pWav, "WAVE", 4);
    } else if (pFormat->container == drwav_container_w64) {
        drwav_uint64 chunkSizeRIFF = 80 + 24 + initialDataChunkSize;            /* +24 because W64 includes the size of the GUID and size fields. */
        runningPos += drwav__write(pWav, drwavGUID_W64_RIFF, 16);
        runningPos += drwav__write_u64ne_to_le(pWav, chunkSizeRIFF);
        runningPos += drwav__write(pWav, drwavGUID_W64_WAVE, 16);
    } else if (pFormat->container == drwav_container_rf64) {
        runningPos += drwav__write(pWav, "RF64", 4);
        runningPos += drwav__write_u32ne_to_le(pWav, 0xFFFFFFFF);               /* Always 0xFFFFFFFF for RF64. Set to a proper value in the "ds64" chunk. */
        runningPos += drwav__write(pWav, "WAVE", 4);
    }

    
    /* "ds64" chunk (RF64 only). */
    if (pFormat->container == drwav_container_rf64) {
        drwav_uint32 initialds64ChunkSize = 28;                                 /* 28 = [Size of RIFF (8 bytes)] + [Size of DATA (8 bytes)] + [Sample Count (8 bytes)] + [Table Length (4 bytes)]. Table length always set to 0. */
        drwav_uint64 initialRiffChunkSize = 8 + initialds64ChunkSize + initialDataChunkSize;    /* +8 for the ds64 header. */

        runningPos += drwav__write(pWav, "ds64", 4);
        runningPos += drwav__write_u32ne_to_le(pWav, initialds64ChunkSize);     /* Size of ds64. */
        runningPos += drwav__write_u64ne_to_le(pWav, initialRiffChunkSize);     /* Size of RIFF. Set to true value at the end. */
        runningPos += drwav__write_u64ne_to_le(pWav, initialDataChunkSize);     /* Size of DATA. Set to true value at the end. */
        runningPos += drwav__write_u64ne_to_le(pWav, totalSampleCount);         /* Sample count. */
        runningPos += drwav__write_u32ne_to_le(pWav, 0);                        /* Table length. Always set to zero in our case since we're not doing any other chunks than "DATA". */
    }


    /* "fmt " chunk. */
    if (pFormat->container == drwav_container_riff || pFormat->container == drwav_container_rf64) {
        chunkSizeFMT = 16;
        runningPos += drwav__write(pWav, "fmt ", 4);
        runningPos += drwav__write_u32ne_to_le(pWav, (drwav_uint32)chunkSizeFMT);
    } else if (pFormat->container == drwav_container_w64) {
        chunkSizeFMT = 40;
        runningPos += drwav__write(pWav, drwavGUID_W64_FMT, 16);
        runningPos += drwav__write_u64ne_to_le(pWav, chunkSizeFMT);
    }

    runningPos += drwav__write_u16ne_to_le(pWav, pWav->fmt.formatTag);
    runningPos += drwav__write_u16ne_to_le(pWav, pWav->fmt.channels);
    runningPos += drwav__write_u32ne_to_le(pWav, pWav->fmt.sampleRate);
    runningPos += drwav__write_u32ne_to_le(pWav, pWav->fmt.avgBytesPerSec);
    runningPos += drwav__write_u16ne_to_le(pWav, pWav->fmt.blockAlign);
    runningPos += drwav__write_u16ne_to_le(pWav, pWav->fmt.bitsPerSample);

    pWav->dataChunkDataPos = runningPos;

    /* "data" chunk. */
    if (pFormat->container == drwav_container_riff) {
        drwav_uint32 chunkSizeDATA = (drwav_uint32)initialDataChunkSize;
        runningPos += drwav__write(pWav, "data", 4);
        runningPos += drwav__write_u32ne_to_le(pWav, chunkSizeDATA);
    } else if (pFormat->container == drwav_container_w64) {
        drwav_uint64 chunkSizeDATA = 24 + initialDataChunkSize;     /* +24 because W64 includes the size of the GUID and size fields. */
        runningPos += drwav__write(pWav, drwavGUID_W64_DATA, 16);
        runningPos += drwav__write_u64ne_to_le(pWav, chunkSizeDATA);
    } else if (pFormat->container == drwav_container_rf64) {
        runningPos += drwav__write(pWav, "data", 4);
        runningPos += drwav__write_u32ne_to_le(pWav, 0xFFFFFFFF);   /* Always set to 0xFFFFFFFF for RF64. The true size of the data chunk is specified in the ds64 chunk. */
    }

    /*
    The runningPos variable is incremented in the section above but is left unused which is causing some static analysis tools to detect it
    as a dead store. I'm leaving this as-is for safety just in case I want to expand this function later to include other tags and want to
    keep track of the running position for whatever reason. The line below should silence the static analysis tools.
    */
    (void)runningPos;

    /* Set some properties for the client's convenience. */
    pWav->container = pFormat->container;
    pWav->channels = (drwav_uint16)pFormat->channels;
    pWav->sampleRate = pFormat->sampleRate;
    pWav->bitsPerSample = (drwav_uint16)pFormat->bitsPerSample;
    pWav->translatedFormatTag = (drwav_uint16)pFormat->format;

    return DRWAV_TRUE;
}


DRWAV_API drwav_bool32 drwav_init_write(drwav* pWav, const drwav_data_format* pFormat, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks)
{
    if (!drwav_preinit_write(pWav, pFormat, DRWAV_FALSE, onWrite, onSeek, pUserData, pAllocationCallbacks)) {
        return DRWAV_FALSE;
    }

    return drwav_init_write__internal(pWav, pFormat, 0);               /* DRWAV_FALSE = Not Sequential */
}

DRWAV_API drwav_bool32 drwav_init_write_sequential(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_write_proc onWrite, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks)
{
    if (!drwav_preinit_write(pWav, pFormat, DRWAV_TRUE, onWrite, NULL, pUserData, pAllocationCallbacks)) {
        return DRWAV_FALSE;
    }

    return drwav_init_write__internal(pWav, pFormat, totalSampleCount); /* DRWAV_TRUE = Sequential */
}

DRWAV_API drwav_bool32 drwav_init_write_sequential_pcm_frames(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, drwav_write_proc onWrite, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks)
{
    if (pFormat == NULL) {
        return DRWAV_FALSE;
    }

    return drwav_init_write_sequential(pWav, pFormat, totalPCMFrameCount*pFormat->channels, onWrite, pUserData, pAllocationCallbacks);
}

DRWAV_API drwav_uint64 drwav_target_write_size_bytes(const drwav_data_format* pFormat, drwav_uint64 totalSampleCount)
{
    /* Casting totalSampleCount to drwav_int64 for VC6 compatibility. No issues in practice because nobody is going to exhaust the whole 63 bits. */
    drwav_uint64 targetDataSizeBytes = (drwav_uint64)((drwav_int64)totalSampleCount * pFormat->channels * pFormat->bitsPerSample/8.0);
    drwav_uint64 riffChunkSizeBytes;
    drwav_uint64 fileSizeBytes = 0;

    if (pFormat->container == drwav_container_riff) {
        riffChunkSizeBytes = drwav__riff_chunk_size_riff(targetDataSizeBytes);
        fileSizeBytes = (8 + riffChunkSizeBytes);   /* +8 because WAV doesn't include the size of the ChunkID and ChunkSize fields. */
    } else if (pFormat->container == drwav_container_w64) {
        riffChunkSizeBytes = drwav__riff_chunk_size_w64(targetDataSizeBytes);
        fileSizeBytes = riffChunkSizeBytes;
    } else if (pFormat->container == drwav_container_rf64) {
        riffChunkSizeBytes = drwav__riff_chunk_size_rf64(targetDataSizeBytes);
        fileSizeBytes = (8 + riffChunkSizeBytes);   /* +8 because WAV doesn't include the size of the ChunkID and ChunkSize fields. */
    }

    return fileSizeBytes;
}


#ifndef DR_WAV_NO_STDIO

/* drwav_result_from_errno() is only used for fopen() and wfopen() so putting it inside DR_WAV_NO_STDIO for now. If something else needs this later we can move it out. */
#include <errno.h>
static drwav_result drwav_result_from_errno(int e)
{
    switch (e)
    {
        case 0: return DRWAV_SUCCESS;
    #ifdef EPERM
        case EPERM: return DRWAV_INVALID_OPERATION;
    #endif
    #ifdef ENOENT
        case ENOENT: return DRWAV_DOES_NOT_EXIST;
    #endif
    #ifdef ESRCH
        case ESRCH: return DRWAV_DOES_NOT_EXIST;
    #endif
    #ifdef EINTR
        case EINTR: return DRWAV_INTERRUPT;
    #endif
    #ifdef EIO
        case EIO: return DRWAV_IO_ERROR;
    #endif
    #ifdef ENXIO
        case ENXIO: return DRWAV_DOES_NOT_EXIST;
    #endif
    #ifdef E2BIG
        case E2BIG: return DRWAV_INVALID_ARGS;
    #endif
    #ifdef ENOEXEC
        case ENOEXEC: return DRWAV_INVALID_FILE;
    #endif
    #ifdef EBADF
        case EBADF: return DRWAV_INVALID_FILE;
    #endif
    #ifdef ECHILD
        case ECHILD: return DRWAV_ERROR;
    #endif
    #ifdef EAGAIN
        case EAGAIN: return DRWAV_UNAVAILABLE;
    #endif
    #ifdef ENOMEM
        case ENOMEM: return DRWAV_OUT_OF_MEMORY;
    #endif
    #ifdef EACCES
        case EACCES: return DRWAV_ACCESS_DENIED;
    #endif
    #ifdef EFAULT
        case EFAULT: return DRWAV_BAD_ADDRESS;
    #endif
    #ifdef ENOTBLK
        case ENOTBLK: return DRWAV_ERROR;
    #endif
    #ifdef EBUSY
        case EBUSY: return DRWAV_BUSY;
    #endif
    #ifdef EEXIST
        case EEXIST: return DRWAV_ALREADY_EXISTS;
    #endif
    #ifdef EXDEV
        case EXDEV: return DRWAV_ERROR;
    #endif
    #ifdef ENODEV
        case ENODEV: return DRWAV_DOES_NOT_EXIST;
    #endif
    #ifdef ENOTDIR
        case ENOTDIR: return DRWAV_NOT_DIRECTORY;
    #endif
    #ifdef EISDIR
        case EISDIR: return DRWAV_IS_DIRECTORY;
    #endif
    #ifdef EINVAL
        case EINVAL: return DRWAV_INVALID_ARGS;
    #endif
    #ifdef ENFILE
        case ENFILE: return DRWAV_TOO_MANY_OPEN_FILES;
    #endif
    #ifdef EMFILE
        case EMFILE: return DRWAV_TOO_MANY_OPEN_FILES;
    #endif
    #ifdef ENOTTY
        case ENOTTY: return DRWAV_INVALID_OPERATION;
    #endif
    #ifdef ETXTBSY
        case ETXTBSY: return DRWAV_BUSY;
    #endif
    #ifdef EFBIG
        case EFBIG: return DRWAV_TOO_BIG;
    #endif
    #ifdef ENOSPC
        case ENOSPC: return DRWAV_NO_SPACE;
    #endif
    #ifdef ESPIPE
        case ESPIPE: return DRWAV_BAD_SEEK;
    #endif
    #ifdef EROFS
        case EROFS: return DRWAV_ACCESS_DENIED;
    #endif
    #ifdef EMLINK
        case EMLINK: return DRWAV_TOO_MANY_LINKS;
    #endif
    #ifdef EPIPE
        case EPIPE: return DRWAV_BAD_PIPE;
    #endif
    #ifdef EDOM
        case EDOM: return DRWAV_OUT_OF_RANGE;
    #endif
    #ifdef ERANGE
        case ERANGE: return DRWAV_OUT_OF_RANGE;
    #endif
    #ifdef EDEADLK
        case EDEADLK: return DRWAV_DEADLOCK;
    #endif
    #ifdef ENAMETOOLONG
        case ENAMETOOLONG: return DRWAV_PATH_TOO_LONG;
    #endif
    #ifdef ENOLCK
        case ENOLCK: return DRWAV_ERROR;
    #endif
    #ifdef ENOSYS
        case ENOSYS: return DRWAV_NOT_IMPLEMENTED;
    #endif
    #ifdef ENOTEMPTY
        case ENOTEMPTY: return DRWAV_DIRECTORY_NOT_EMPTY;
    #endif
    #ifdef ELOOP
        case ELOOP: return DRWAV_TOO_MANY_LINKS;
    #endif
    #ifdef ENOMSG
        case ENOMSG: return DRWAV_NO_MESSAGE;
    #endif
    #ifdef EIDRM
        case EIDRM: return DRWAV_ERROR;
    #endif
    #ifdef ECHRNG
        case ECHRNG: return DRWAV_ERROR;
    #endif
    #ifdef EL2NSYNC
        case EL2NSYNC: return DRWAV_ERROR;
    #endif
    #ifdef EL3HLT
        case EL3HLT: return DRWAV_ERROR;
    #endif
    #ifdef EL3RST
        case EL3RST: return DRWAV_ERROR;
    #endif
    #ifdef ELNRNG
        case ELNRNG: return DRWAV_OUT_OF_RANGE;
    #endif
    #ifdef EUNATCH
        case EUNATCH: return DRWAV_ERROR;
    #endif
    #ifdef ENOCSI
        case ENOCSI: return DRWAV_ERROR;
    #endif
    #ifdef EL2HLT
        case EL2HLT: return DRWAV_ERROR;
    #endif
    #ifdef EBADE
        case EBADE: return DRWAV_ERROR;
    #endif
    #ifdef EBADR
        case EBADR: return DRWAV_ERROR;
    #endif
    #ifdef EXFULL
        case EXFULL: return DRWAV_ERROR;
    #endif
    #ifdef ENOANO
        case ENOANO: return DRWAV_ERROR;
    #endif
    #ifdef EBADRQC
        case EBADRQC: return DRWAV_ERROR;
    #endif
    #ifdef EBADSLT
        case EBADSLT: return DRWAV_ERROR;
    #endif
    #ifdef EBFONT
        case EBFONT: return DRWAV_INVALID_FILE;
    #endif
    #ifdef ENOSTR
        case ENOSTR: return DRWAV_ERROR;
    #endif
    #ifdef ENODATA
        case ENODATA: return DRWAV_NO_DATA_AVAILABLE;
    #endif
    #ifdef ETIME
        case ETIME: return DRWAV_TIMEOUT;
    #endif
    #ifdef ENOSR
        case ENOSR: return DRWAV_NO_DATA_AVAILABLE;
    #endif
    #ifdef ENONET
        case ENONET: return DRWAV_NO_NETWORK;
    #endif
    #ifdef ENOPKG
        case ENOPKG: return DRWAV_ERROR;
    #endif
    #ifdef EREMOTE
        case EREMOTE: return DRWAV_ERROR;
    #endif
    #ifdef ENOLINK
        case ENOLINK: return DRWAV_ERROR;
    #endif
    #ifdef EADV
        case EADV: return DRWAV_ERROR;
    #endif
    #ifdef ESRMNT
        case ESRMNT: return DRWAV_ERROR;
    #endif
    #ifdef ECOMM
        case ECOMM: return DRWAV_ERROR;
    #endif
    #ifdef EPROTO
        case EPROTO: return DRWAV_ERROR;
    #endif
    #ifdef EMULTIHOP
        case EMULTIHOP: return DRWAV_ERROR;
    #endif
    #ifdef EDOTDOT
        case EDOTDOT: return DRWAV_ERROR;
    #endif
    #ifdef EBADMSG
        case EBADMSG: return DRWAV_BAD_MESSAGE;
    #endif
    #ifdef EOVERFLOW
        case EOVERFLOW: return DRWAV_TOO_BIG;
    #endif
    #ifdef ENOTUNIQ
        case ENOTUNIQ: return DRWAV_NOT_UNIQUE;
    #endif
    #ifdef EBADFD
        case EBADFD: return DRWAV_ERROR;
    #endif
    #ifdef EREMCHG
        case EREMCHG: return DRWAV_ERROR;
    #endif
    #ifdef ELIBACC
        case ELIBACC: return DRWAV_ACCESS_DENIED;
    #endif
    #ifdef ELIBBAD
        case ELIBBAD: return DRWAV_INVALID_FILE;
    #endif
    #ifdef ELIBSCN
        case ELIBSCN: return DRWAV_INVALID_FILE;
    #endif
    #ifdef ELIBMAX
        case ELIBMAX: return DRWAV_ERROR;
    #endif
    #ifdef ELIBEXEC
        case ELIBEXEC: return DRWAV_ERROR;
    #endif
    #ifdef EILSEQ
        case EILSEQ: return DRWAV_INVALID_DATA;
    #endif
    #ifdef ERESTART
        case ERESTART: return DRWAV_ERROR;
    #endif
    #ifdef ESTRPIPE
        case ESTRPIPE: return DRWAV_ERROR;
    #endif
    #ifdef EUSERS
        case EUSERS: return DRWAV_ERROR;
    #endif
    #ifdef ENOTSOCK
        case ENOTSOCK: return DRWAV_NOT_SOCKET;
    #endif
    #ifdef EDESTADDRREQ
        case EDESTADDRREQ: return DRWAV_NO_ADDRESS;
    #endif
    #ifdef EMSGSIZE
        case EMSGSIZE: return DRWAV_TOO_BIG;
    #endif
    #ifdef EPROTOTYPE
        case EPROTOTYPE: return DRWAV_BAD_PROTOCOL;
    #endif
    #ifdef ENOPROTOOPT
        case ENOPROTOOPT: return DRWAV_PROTOCOL_UNAVAILABLE;
    #endif
    #ifdef EPROTONOSUPPORT
        case EPROTONOSUPPORT: return DRWAV_PROTOCOL_NOT_SUPPORTED;
    #endif
    #ifdef ESOCKTNOSUPPORT
        case ESOCKTNOSUPPORT: return DRWAV_SOCKET_NOT_SUPPORTED;
    #endif
    #ifdef EOPNOTSUPP
        case EOPNOTSUPP: return DRWAV_INVALID_OPERATION;
    #endif
    #ifdef EPFNOSUPPORT
        case EPFNOSUPPORT: return DRWAV_PROTOCOL_FAMILY_NOT_SUPPORTED;
    #endif
    #ifdef EAFNOSUPPORT
        case EAFNOSUPPORT: return DRWAV_ADDRESS_FAMILY_NOT_SUPPORTED;
    #endif
    #ifdef EADDRINUSE
        case EADDRINUSE: return DRWAV_ALREADY_IN_USE;
    #endif
    #ifdef EADDRNOTAVAIL
        case EADDRNOTAVAIL: return DRWAV_ERROR;
    #endif
    #ifdef ENETDOWN
        case ENETDOWN: return DRWAV_NO_NETWORK;
    #endif
    #ifdef ENETUNREACH
        case ENETUNREACH: return DRWAV_NO_NETWORK;
    #endif
    #ifdef ENETRESET
        case ENETRESET: return DRWAV_NO_NETWORK;
    #endif
    #ifdef ECONNABORTED
        case ECONNABORTED: return DRWAV_NO_NETWORK;
    #endif
    #ifdef ECONNRESET
        case ECONNRESET: return DRWAV_CONNECTION_RESET;
    #endif
    #ifdef ENOBUFS
        case ENOBUFS: return DRWAV_NO_SPACE;
    #endif
    #ifdef EISCONN
        case EISCONN: return DRWAV_ALREADY_CONNECTED;
    #endif
    #ifdef ENOTCONN
        case ENOTCONN: return DRWAV_NOT_CONNECTED;
    #endif
    #ifdef ESHUTDOWN
        case ESHUTDOWN: return DRWAV_ERROR;
    #endif
    #ifdef ETOOMANYREFS
        case ETOOMANYREFS: return DRWAV_ERROR;
    #endif
    #ifdef ETIMEDOUT
        case ETIMEDOUT: return DRWAV_TIMEOUT;
    #endif
    #ifdef ECONNREFUSED
        case ECONNREFUSED: return DRWAV_CONNECTION_REFUSED;
    #endif
    #ifdef EHOSTDOWN
        case EHOSTDOWN: return DRWAV_NO_HOST;
    #endif
    #ifdef EHOSTUNREACH
        case EHOSTUNREACH: return DRWAV_NO_HOST;
    #endif
    #ifdef EALREADY
        case EALREADY: return DRWAV_IN_PROGRESS;
    #endif
    #ifdef EINPROGRESS
        case EINPROGRESS: return DRWAV_IN_PROGRESS;
    #endif
    #ifdef ESTALE
        case ESTALE: return DRWAV_INVALID_FILE;
    #endif
    #ifdef EUCLEAN
        case EUCLEAN: return DRWAV_ERROR;
    #endif
    #ifdef ENOTNAM
        case ENOTNAM: return DRWAV_ERROR;
    #endif
    #ifdef ENAVAIL
        case ENAVAIL: return DRWAV_ERROR;
    #endif
    #ifdef EISNAM
        case EISNAM: return DRWAV_ERROR;
    #endif
    #ifdef EREMOTEIO
        case EREMOTEIO: return DRWAV_IO_ERROR;
    #endif
    #ifdef EDQUOT
        case EDQUOT: return DRWAV_NO_SPACE;
    #endif
    #ifdef ENOMEDIUM
        case ENOMEDIUM: return DRWAV_DOES_NOT_EXIST;
    #endif
    #ifdef EMEDIUMTYPE
        case EMEDIUMTYPE: return DRWAV_ERROR;
    #endif
    #ifdef ECANCELED
        case ECANCELED: return DRWAV_CANCELLED;
    #endif
    #ifdef ENOKEY
        case ENOKEY: return DRWAV_ERROR;
    #endif
    #ifdef EKEYEXPIRED
        case EKEYEXPIRED: return DRWAV_ERROR;
    #endif
    #ifdef EKEYREVOKED
        case EKEYREVOKED: return DRWAV_ERROR;
    #endif
    #ifdef EKEYREJECTED
        case EKEYREJECTED: return DRWAV_ERROR;
    #endif
    #ifdef EOWNERDEAD
        case EOWNERDEAD: return DRWAV_ERROR;
    #endif
    #ifdef ENOTRECOVERABLE
        case ENOTRECOVERABLE: return DRWAV_ERROR;
    #endif
    #ifdef ERFKILL
        case ERFKILL: return DRWAV_ERROR;
    #endif
    #ifdef EHWPOISON
        case EHWPO
Download .txt
gitextract_5181guli/

├── .github/
│   └── workflows/
│       └── build.yml
├── .gitignore
├── .gitmodules
├── CMakeLists.txt
├── README.md
├── convert.py
├── decoder.h
├── encodec.cpp
├── encodec.h
├── encoder.h
├── examples/
│   ├── CMakeLists.txt
│   ├── README.md
│   ├── common.cpp
│   ├── common.h
│   ├── compress/
│   │   ├── CMakeLists.txt
│   │   └── main.cpp
│   ├── decompress/
│   │   ├── CMakeLists.txt
│   │   └── main.cpp
│   ├── dr_wav.h
│   ├── json.hpp
│   └── main/
│       ├── CMakeLists.txt
│       └── main.cpp
├── lstm.h
├── ops.cpp
├── ops.h
├── quantizer.h
└── utils.h
Download .txt
SYMBOL INDEX (818 symbols across 16 files)

FILE: convert.py
  function parse_codec_model (line 46) | def parse_codec_model(checkpoint, outfile, use_f16):
  function parse_hparams (line 121) | def parse_hparams(outfile, use_f16):

FILE: decoder.h
  type encodec_decoder_block (line 13) | struct encodec_decoder_block {
  type encodec_decoder (line 31) | struct encodec_decoder {
  type ggml_tensor (line 43) | struct ggml_tensor
  type encodec_decoder (line 44) | struct encodec_decoder
  type ggml_context (line 44) | struct ggml_context
  type ggml_tensor (line 45) | struct ggml_tensor
  type ggml_tensor (line 53) | struct ggml_tensor
  type ggml_tensor (line 58) | struct ggml_tensor
  type ggml_tensor (line 64) | struct ggml_tensor
  type ggml_tensor (line 69) | struct ggml_tensor
  type ggml_tensor (line 84) | struct ggml_tensor
  type ggml_tensor (line 87) | struct ggml_tensor
  type ggml_tensor (line 109) | struct ggml_tensor

FILE: encodec.cpp
  type encodec_hparams (line 47) | struct encodec_hparams {
  type encodec_model (line 83) | struct encodec_model {
    type ggml_context (line 91) | struct ggml_context
    type ggml_tensor (line 98) | struct ggml_tensor
  type encodec_ggml_cgraph_deleter (line 101) | struct encodec_ggml_cgraph_deleter {
    type ggml_cgraph (line 102) | struct ggml_cgraph
  type encodec_context (line 115) | struct encodec_context {
    type ggml_cgraph (line 121) | struct ggml_cgraph
    type ggml_tensor (line 130) | struct ggml_tensor
    type ggml_tensor (line 131) | struct ggml_tensor
    type ggml_tensor (line 132) | struct ggml_tensor
  function encodec_load_model_weights (line 141) | bool encodec_load_model_weights(std::ifstream &infile, encodec_model &mo...
  type ggml_cgraph (line 507) | struct ggml_cgraph
  type ggml_cgraph (line 508) | struct ggml_cgraph
  type ggml_cgraph (line 508) | struct ggml_cgraph
  type ggml_cgraph (line 508) | struct ggml_cgraph
  type ggml_tensor (line 512) | struct ggml_tensor
  type ggml_tensor (line 512) | struct ggml_tensor
  type ggml_tensor (line 513) | struct ggml_tensor
  type ggml_tensor (line 513) | struct ggml_tensor
  type ggml_tensor (line 539) | struct ggml_tensor
  type ggml_tensor (line 539) | struct ggml_tensor
  function encodec_build_graph (line 546) | void encodec_build_graph(struct encodec_context *ectx,
  function encodec_build_graph (line 630) | void encodec_build_graph(struct encodec_context *ectx, const int32_t *co...
  function encodec_zero_tensor (line 706) | static void encodec_zero_tensor(struct ggml_cgraph *gf, const char *name) {
  function encodec_eval_internal (line 711) | bool encodec_eval_internal(struct encodec_context *ectx, const float * r...
  function encodec_eval_internal (line 753) | bool encodec_eval_internal(struct encodec_context *ectx, const int32_t *...
  function encodec_eval (line 789) | bool encodec_eval(struct encodec_context *ectx, const float *raw_audio,
  function encodec_eval (line 819) | bool encodec_eval(struct encodec_context *ectx, const int32_t *codes,
  function encodec_reconstruct_audio (line 849) | bool encodec_reconstruct_audio(struct encodec_context *ectx, const float...
  function encodec_compress_audio (line 878) | bool encodec_compress_audio(struct encodec_context *ectx, const float *r...
  function encodec_decompress_audio (line 902) | bool encodec_decompress_audio(struct encodec_context *ectx, const int32_...
  type encodec_context (line 933) | struct encodec_context
    type ggml_cgraph (line 121) | struct ggml_cgraph
    type ggml_tensor (line 130) | struct ggml_tensor
    type ggml_tensor (line 131) | struct ggml_tensor
    type ggml_tensor (line 132) | struct ggml_tensor
  type encodec_context (line 946) | struct encodec_context
    type ggml_cgraph (line 121) | struct ggml_cgraph
    type ggml_tensor (line 130) | struct ggml_tensor
    type ggml_tensor (line 131) | struct ggml_tensor
    type ggml_tensor (line 132) | struct ggml_tensor
  function encodec_free (line 972) | void encodec_free(struct encodec_context *ectx) {
  function encodec_set_target_bandwidth (line 991) | void encodec_set_target_bandwidth(struct encodec_context *ectx, int band...
  function encodec_set_sample_rate (line 995) | void encodec_set_sample_rate(struct encodec_context *ectx, int sample_ra...
  type encodec_statistics (line 999) | struct encodec_statistics
  type encodec_context (line 999) | struct encodec_context
    type ggml_cgraph (line 121) | struct ggml_cgraph
    type ggml_tensor (line 130) | struct ggml_tensor
    type ggml_tensor (line 131) | struct ggml_tensor
    type ggml_tensor (line 132) | struct ggml_tensor
  function encodec_reset_statistics (line 1007) | void encodec_reset_statistics(struct encodec_context *ectx) {
  type encodec_context (line 1015) | struct encodec_context
    type ggml_cgraph (line 121) | struct ggml_cgraph
    type ggml_tensor (line 130) | struct ggml_tensor
    type ggml_tensor (line 131) | struct ggml_tensor
    type ggml_tensor (line 132) | struct ggml_tensor
  function encodec_get_audio_size (line 1023) | int encodec_get_audio_size(struct encodec_context *ectx) {
  type encodec_context (line 1031) | struct encodec_context
    type ggml_cgraph (line 121) | struct ggml_cgraph
    type ggml_tensor (line 130) | struct ggml_tensor
    type ggml_tensor (line 131) | struct ggml_tensor
    type ggml_tensor (line 132) | struct ggml_tensor
  function encodec_get_codes_size (line 1039) | int encodec_get_codes_size(struct encodec_context *ectx) {

FILE: encodec.h
  type encodec_context (line 34) | struct encodec_context
  type encodec_statistics (line 36) | struct encodec_statistics {
  type encodec_context (line 51) | struct encodec_context
  type encodec_context (line 63) | struct encodec_context
  type encodec_context (line 73) | struct encodec_context
  type encodec_context (line 86) | struct encodec_context
  type encodec_context (line 101) | struct encodec_context
  type encodec_context (line 116) | struct encodec_context
  type encodec_context (line 128) | struct encodec_context
  type encodec_context (line 137) | struct encodec_context
  type encodec_context (line 146) | struct encodec_context
  type encodec_context (line 155) | struct encodec_context
  type encodec_statistics (line 163) | struct encodec_statistics
  type encodec_context (line 164) | struct encodec_context
  type encodec_context (line 172) | struct encodec_context
  type encodec_context (line 180) | struct encodec_context

FILE: encoder.h
  type encodec_encoder_block (line 9) | struct encodec_encoder_block {
  type encodec_encoder (line 27) | struct encodec_encoder {
  type ggml_tensor (line 39) | struct ggml_tensor
  type encodec_encoder (line 40) | struct encodec_encoder
  type ggml_context (line 40) | struct ggml_context
  type ggml_tensor (line 41) | struct ggml_tensor
  type ggml_tensor (line 49) | struct ggml_tensor
  type ggml_tensor (line 55) | struct ggml_tensor
  type ggml_tensor (line 58) | struct ggml_tensor
  type ggml_tensor (line 85) | struct ggml_tensor
  type ggml_tensor (line 91) | struct ggml_tensor
  type ggml_tensor (line 96) | struct ggml_tensor
  type ggml_tensor (line 105) | struct ggml_tensor

FILE: examples/common.cpp
  function swap_endianness (line 23) | uint32_t swap_endianness(uint32_t value) {
  function is_big_endian (line 31) | bool is_big_endian(void) {
  function encodec_print_usage (line 40) | void encodec_print_usage(char ** argv, const encodec_params & params) {
  function encodec_params_parse (line 56) | int encodec_params_parse(int argc, char ** argv, encodec_params & params) {
  function read_wav_from_disk (line 83) | bool read_wav_from_disk(std::string in_path, std::vector<float> & audio_...
  function write_wav_on_disk (line 106) | void write_wav_on_disk(std::vector<float> & audio_arr, std::string dest_...
  class BitPacker (line 122) | class BitPacker {
    method BitPacker (line 125) | BitPacker(int bits, std::ofstream& fo)
    method push (line 129) | void push(int value) {
    method flush (line 141) | void flush() {
  class BitUnpacker (line 157) | class BitUnpacker {
    method BitUnpacker (line 160) | BitUnpacker(int bits, std::ifstream& fo)
    method pull (line 164) | int pull() {
  function write_encodec_header (line 189) | void write_encodec_header(std::ofstream & fo, uint32_t audio_length) {
  function json (line 216) | json read_ecdc_header(std::ifstream & fin) {
  function write_encodec_codes (line 250) | void write_encodec_codes(
  function write_codes_to_file (line 262) | bool write_codes_to_file(
  function read_codes_from_file (line 276) | bool read_codes_from_file(

FILE: examples/compress/main.cpp
  function main (line 10) | int main(int argc, char **argv) {

FILE: examples/decompress/main.cpp
  function main (line 10) | int main(int argc, char **argv) {

FILE: examples/dr_wav.h
  type drwav_int8 (line 153) | typedef   signed char           drwav_int8;
  type drwav_uint8 (line 154) | typedef unsigned char           drwav_uint8;
  type drwav_int16 (line 155) | typedef   signed short          drwav_int16;
  type drwav_uint16 (line 156) | typedef unsigned short          drwav_uint16;
  type drwav_int32 (line 157) | typedef   signed int            drwav_int32;
  type drwav_uint32 (line 158) | typedef unsigned int            drwav_uint32;
  type drwav_int64 (line 160) | typedef   signed __int64    drwav_int64;
  type drwav_uint64 (line 161) | typedef unsigned __int64    drwav_uint64;
  type drwav_int64 (line 170) | typedef   signed long long  drwav_int64;
  type drwav_uint64 (line 171) | typedef unsigned long long  drwav_uint64;
  type drwav_uint64 (line 177) | typedef drwav_uint64        drwav_uintptr;
  type drwav_uint32 (line 179) | typedef drwav_uint32        drwav_uintptr;
  type drwav_uint8 (line 181) | typedef drwav_uint8             drwav_bool8;
  type drwav_uint32 (line 182) | typedef drwav_uint32            drwav_bool32;
  type drwav_int32 (line 216) | typedef drwav_int32 drwav_result;
  type drwav_seek_origin (line 292) | typedef enum
  type drwav_container (line 298) | typedef enum
  type drwav_chunk_header (line 305) | typedef struct
  type drwav_fmt (line 323) | typedef struct
  type drwav_bool32 (line 405) | typedef drwav_bool32 (* drwav_seek_proc)(void* pUserData, int offset, dr...
  type drwav_uint64 (line 431) | typedef drwav_uint64 (* drwav_chunk_proc)(void* pChunkUserData, drwav_re...
  type drwav_allocation_callbacks (line 433) | typedef struct
  type drwav__memory_stream (line 442) | typedef struct
  type drwav__memory_stream_write (line 450) | typedef struct
  type drwav_data_format (line 459) | typedef struct
  type drwav_smpl_loop (line 470) | typedef struct
  type drwav_smpl (line 480) | typedef struct
  type drwav (line 494) | typedef struct
  function DRWAV_API (line 1066) | DRWAV_API void drwav_version(drwav_uint32* pMajor, drwav_uint32* pMinor,...
  function DRWAV_API (line 1081) | DRWAV_API const char* drwav_version_string(void)
  function DRWAV_INLINE (line 1108) | static DRWAV_INLINE drwav_bool32 drwav__guid_equal(const drwav_uint8 a[1...
  function DRWAV_INLINE (line 1120) | static DRWAV_INLINE drwav_bool32 drwav__fourcc_equal(const drwav_uint8* ...
  function DRWAV_INLINE (line 1131) | static DRWAV_INLINE int drwav__is_little_endian(void)
  function DRWAV_INLINE (line 1143) | static DRWAV_INLINE drwav_uint16 drwav__bytes_to_u16(const drwav_uint8* ...
  function DRWAV_INLINE (line 1148) | static DRWAV_INLINE drwav_int16 drwav__bytes_to_s16(const drwav_uint8* d...
  function DRWAV_INLINE (line 1153) | static DRWAV_INLINE drwav_uint32 drwav__bytes_to_u32(const drwav_uint8* ...
  function DRWAV_INLINE (line 1158) | static DRWAV_INLINE drwav_int32 drwav__bytes_to_s32(const drwav_uint8* d...
  function DRWAV_INLINE (line 1163) | static DRWAV_INLINE drwav_uint64 drwav__bytes_to_u64(const drwav_uint8* ...
  function DRWAV_INLINE (line 1170) | static DRWAV_INLINE drwav_int64 drwav__bytes_to_s64(const drwav_uint8* d...
  function DRWAV_INLINE (line 1175) | static DRWAV_INLINE void drwav__bytes_to_guid(const drwav_uint8* data, d...
  function DRWAV_INLINE (line 1184) | static DRWAV_INLINE drwav_uint16 drwav__bswap16(drwav_uint16 n)
  function DRWAV_INLINE (line 1231) | static DRWAV_INLINE drwav_uint64 drwav__bswap64(drwav_uint64 n)
  function DRWAV_INLINE (line 1255) | static DRWAV_INLINE drwav_int16 drwav__bswap_s16(drwav_int16 n)
  function DRWAV_INLINE (line 1260) | static DRWAV_INLINE void drwav__bswap_samples_s16(drwav_int16* pSamples,...
  function DRWAV_INLINE (line 1269) | static DRWAV_INLINE void drwav__bswap_s24(drwav_uint8* p)
  function DRWAV_INLINE (line 1277) | static DRWAV_INLINE void drwav__bswap_samples_s24(drwav_uint8* pSamples,...
  function DRWAV_INLINE (line 1287) | static DRWAV_INLINE drwav_int32 drwav__bswap_s32(drwav_int32 n)
  function DRWAV_INLINE (line 1292) | static DRWAV_INLINE void drwav__bswap_samples_s32(drwav_int32* pSamples,...
  function DRWAV_INLINE (line 1301) | static DRWAV_INLINE float drwav__bswap_f32(float n)
  function DRWAV_INLINE (line 1313) | static DRWAV_INLINE void drwav__bswap_samples_f32(float* pSamples, drwav...
  function DRWAV_INLINE (line 1322) | static DRWAV_INLINE double drwav__bswap_f64(double n)
  function DRWAV_INLINE (line 1334) | static DRWAV_INLINE void drwav__bswap_samples_f64(double* pSamples, drwa...
  function DRWAV_INLINE (line 1343) | static DRWAV_INLINE void drwav__bswap_samples_pcm(void* pSamples, drwav_...
  function DRWAV_INLINE (line 1368) | static DRWAV_INLINE void drwav__bswap_samples_ieee(void* pSamples, drwav...
  function DRWAV_INLINE (line 1394) | static DRWAV_INLINE void drwav__bswap_samples(void* pSamples, drwav_uint...
  function drwav__free_default (line 1437) | static void drwav__free_default(void* p, void* pUserData)
  function drwav__free_from_callbacks (line 1492) | static void drwav__free_from_callbacks(void* p, const drwav_allocation_c...
  function drwav_allocation_callbacks (line 1504) | static drwav_allocation_callbacks drwav_copy_allocation_callbacks_or_def...
  function DRWAV_INLINE (line 1521) | static DRWAV_INLINE drwav_bool32 drwav__is_compressed_format_tag(drwav_u...
  function drwav__chunk_padding_size_riff (line 1528) | static unsigned int drwav__chunk_padding_size_riff(drwav_uint64 chunkSize)
  function drwav__chunk_padding_size_w64 (line 1533) | static unsigned int drwav__chunk_padding_size_w64(drwav_uint64 chunkSize)
  function drwav_result (line 1542) | static drwav_result drwav__read_chunk_header(drwav_read_proc onRead, voi...
  function drwav_bool32 (line 1577) | static drwav_bool32 drwav__seek_forward(drwav_seek_proc onSeek, drwav_ui...
  function drwav_bool32 (line 1597) | static drwav_bool32 drwav__seek_from_start(drwav_seek_proc onSeek, drwav...
  function drwav_bool32 (line 1625) | static drwav_bool32 drwav__read_fmt(drwav_read_proc onRead, drwav_seek_p...
  function drwav__on_read (line 1735) | static size_t drwav__on_read(drwav_read_proc onRead, void* pUserData, vo...
  function drwav_bool32 (line 1748) | static drwav_bool32 drwav__on_seek(drwav_seek_proc onSeek, void* pUserDa...
  function drwav_uint32 (line 1769) | static drwav_uint32 drwav_get_bytes_per_pcm_frame(drwav* pWav)
  function DRWAV_API (line 1783) | DRWAV_API drwav_uint16 drwav_fmt_get_format(const drwav_fmt* pFMT)
  function drwav_bool32 (line 1796) | static drwav_bool32 drwav_preinit(drwav* pWav, drwav_read_proc onRead, d...
  function drwav_bool32 (line 1815) | static drwav_bool32 drwav_init__internal(drwav* pWav, drwav_chunk_proc o...
  function DRWAV_API (line 2239) | DRWAV_API drwav_bool32 drwav_init(drwav* pWav, drwav_read_proc onRead, d...
  function DRWAV_API (line 2244) | DRWAV_API drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead...
  function drwav_uint32 (line 2254) | static drwav_uint32 drwav__riff_chunk_size_riff(drwav_uint64 dataChunkSize)
  function drwav_uint32 (line 2264) | static drwav_uint32 drwav__data_chunk_size_riff(drwav_uint64 dataChunkSize)
  function drwav_uint64 (line 2273) | static drwav_uint64 drwav__riff_chunk_size_w64(drwav_uint64 dataChunkSize)
  function drwav_uint64 (line 2280) | static drwav_uint64 drwav__data_chunk_size_w64(drwav_uint64 dataChunkSize)
  function drwav_uint64 (line 2285) | static drwav_uint64 drwav__riff_chunk_size_rf64(drwav_uint64 dataChunkSize)
  function drwav_uint64 (line 2295) | static drwav_uint64 drwav__data_chunk_size_rf64(drwav_uint64 dataChunkSize)
  function drwav__write (line 2301) | static size_t drwav__write(drwav* pWav, const void* pData, size_t dataSize)
  function drwav__write_u16ne_to_le (line 2310) | static size_t drwav__write_u16ne_to_le(drwav* pWav, drwav_uint16 value)
  function drwav__write_u32ne_to_le (line 2322) | static size_t drwav__write_u32ne_to_le(drwav* pWav, drwav_uint32 value)
  function drwav__write_u64ne_to_le (line 2334) | static size_t drwav__write_u64ne_to_le(drwav* pWav, drwav_uint64 value)
  function drwav_bool32 (line 2347) | static drwav_bool32 drwav_preinit_write(drwav* pWav, const drwav_data_fo...
  function drwav_bool32 (line 2387) | static drwav_bool32 drwav_init_write__internal(drwav* pWav, const drwav_...
  function DRWAV_API (line 2501) | DRWAV_API drwav_bool32 drwav_init_write(drwav* pWav, const drwav_data_fo...
  function DRWAV_API (line 2510) | DRWAV_API drwav_bool32 drwav_init_write_sequential(drwav* pWav, const dr...
  function DRWAV_API (line 2519) | DRWAV_API drwav_bool32 drwav_init_write_sequential_pcm_frames(drwav* pWa...
  function DRWAV_API (line 2528) | DRWAV_API drwav_uint64 drwav_target_write_size_bytes(const drwav_data_fo...
  function drwav_result (line 2554) | static drwav_result drwav_result_from_errno(int e)
  function drwav_result (line 2956) | static drwav_result drwav_fopen(FILE** ppFile, const char* pFilePath, co...
  function drwav_result (line 3016) | static drwav_result drwav_wfopen(FILE** ppFile, const wchar_t* pFilePath...
  function drwav__on_read_stdio (line 3099) | static size_t drwav__on_read_stdio(void* pUserData, void* pBufferOut, si...
  function drwav__on_write_stdio (line 3104) | static size_t drwav__on_write_stdio(void* pUserData, const void* pData, ...
  function drwav_bool32 (line 3109) | static drwav_bool32 drwav__on_seek_stdio(void* pUserData, int offset, dr...
  function DRWAV_API (line 3114) | DRWAV_API drwav_bool32 drwav_init_file(drwav* pWav, const char* filename...
  function drwav_bool32 (line 3120) | static drwav_bool32 drwav_init_file__internal_FILE(drwav* pWav, FILE* pF...
  function DRWAV_API (line 3139) | DRWAV_API drwav_bool32 drwav_init_file_ex(drwav* pWav, const char* filen...
  function DRWAV_API (line 3150) | DRWAV_API drwav_bool32 drwav_init_file_w(drwav* pWav, const wchar_t* fil...
  function DRWAV_API (line 3155) | DRWAV_API drwav_bool32 drwav_init_file_ex_w(drwav* pWav, const wchar_t* ...
  function drwav_bool32 (line 3167) | static drwav_bool32 drwav_init_file_write__internal_FILE(drwav* pWav, FI...
  function drwav_bool32 (line 3186) | static drwav_bool32 drwav_init_file_write__internal(drwav* pWav, const c...
  function drwav_bool32 (line 3197) | static drwav_bool32 drwav_init_file_write_w__internal(drwav* pWav, const...
  function DRWAV_API (line 3208) | DRWAV_API drwav_bool32 drwav_init_file_write(drwav* pWav, const char* fi...
  function DRWAV_API (line 3213) | DRWAV_API drwav_bool32 drwav_init_file_write_sequential(drwav* pWav, con...
  function DRWAV_API (line 3218) | DRWAV_API drwav_bool32 drwav_init_file_write_sequential_pcm_frames(drwav...
  function DRWAV_API (line 3227) | DRWAV_API drwav_bool32 drwav_init_file_write_w(drwav* pWav, const wchar_...
  function DRWAV_API (line 3232) | DRWAV_API drwav_bool32 drwav_init_file_write_sequential_w(drwav* pWav, c...
  function DRWAV_API (line 3237) | DRWAV_API drwav_bool32 drwav_init_file_write_sequential_pcm_frames_w(drw...
  function drwav__on_read_memory (line 3248) | static size_t drwav__on_read_memory(void* pUserData, void* pBufferOut, s...
  function drwav_bool32 (line 3269) | static drwav_bool32 drwav__on_seek_memory(void* pUserData, int offset, d...
  function drwav__on_write_memory (line 3298) | static size_t drwav__on_write_memory(void* pUserData, const void* pDataI...
  function drwav_bool32 (line 3338) | static drwav_bool32 drwav__on_seek_memory_write(void* pUserData, int off...
  function DRWAV_API (line 3367) | DRWAV_API drwav_bool32 drwav_init_memory(drwav* pWav, const void* data, ...
  function DRWAV_API (line 3372) | DRWAV_API drwav_bool32 drwav_init_memory_ex(drwav* pWav, const void* dat...
  function drwav_bool32 (line 3390) | static drwav_bool32 drwav_init_memory_write__internal(drwav* pWav, void*...
  function DRWAV_API (line 3412) | DRWAV_API drwav_bool32 drwav_init_memory_write(drwav* pWav, void** ppDat...
  function DRWAV_API (line 3417) | DRWAV_API drwav_bool32 drwav_init_memory_write_sequential(drwav* pWav, v...
  function DRWAV_API (line 3422) | DRWAV_API drwav_bool32 drwav_init_memory_write_sequential_pcm_frames(drw...
  function DRWAV_API (line 3433) | DRWAV_API drwav_result drwav_uninit(drwav* pWav)
  function DRWAV_API (line 3531) | DRWAV_API size_t drwav_read_raw(drwav* pWav, size_t bytesToRead, void* p...
  function DRWAV_API (line 3585) | DRWAV_API drwav_uint64 drwav_read_pcm_frames_le(drwav* pWav, drwav_uint6...
  function DRWAV_API (line 3621) | DRWAV_API drwav_uint64 drwav_read_pcm_frames_be(drwav* pWav, drwav_uint6...
  function DRWAV_API (line 3632) | DRWAV_API drwav_uint64 drwav_read_pcm_frames(drwav* pWav, drwav_uint64 f...
  function DRWAV_API (line 3643) | DRWAV_API drwav_bool32 drwav_seek_to_first_pcm_frame(drwav* pWav)
  function DRWAV_API (line 3670) | DRWAV_API drwav_bool32 drwav_seek_to_pcm_frame(drwav* pWav, drwav_uint64...
  function DRWAV_API (line 3774) | DRWAV_API size_t drwav_write_raw(drwav* pWav, size_t bytesToWrite, const...
  function DRWAV_API (line 3789) | DRWAV_API drwav_uint64 drwav_write_pcm_frames_le(drwav* pWav, drwav_uint...
  function DRWAV_API (line 3827) | DRWAV_API drwav_uint64 drwav_write_pcm_frames_be(drwav* pWav, drwav_uint...
  function DRWAV_API (line 3883) | DRWAV_API drwav_uint64 drwav_write_pcm_frames(drwav* pWav, drwav_uint64 ...
  function drwav_uint64 (line 3893) | static drwav_uint64 drwav_read_pcm_frames_s16__msadpcm(drwav* pWav, drwa...
  function drwav_uint64 (line 4073) | static drwav_uint64 drwav_read_pcm_frames_s16__ima(drwav* pWav, drwav_ui...
  function DRWAV_INLINE (line 4269) | static DRWAV_INLINE drwav_int16 drwav__alaw_to_s16(drwav_uint8 sampleIn)
  function DRWAV_INLINE (line 4274) | static DRWAV_INLINE drwav_int16 drwav__mulaw_to_s16(drwav_uint8 sampleIn)
  function drwav__pcm_to_s16 (line 4281) | static void drwav__pcm_to_s16(drwav_int16* pOut, const drwav_uint8* pIn,...
  function drwav__ieee_to_s16 (line 4333) | static void drwav__ieee_to_s16(drwav_int16* pOut, const drwav_uint8* pIn...
  function drwav_uint64 (line 4348) | static drwav_uint64 drwav_read_pcm_frames_s16__pcm(drwav* pWav, drwav_ui...
  function drwav_uint64 (line 4382) | static drwav_uint64 drwav_read_pcm_frames_s16__ieee(drwav* pWav, drwav_u...
  function drwav_uint64 (line 4415) | static drwav_uint64 drwav_read_pcm_frames_s16__alaw(drwav* pWav, drwav_u...
  function drwav_uint64 (line 4448) | static drwav_uint64 drwav_read_pcm_frames_s16__mulaw(drwav* pWav, drwav_...
  function DRWAV_API (line 4481) | DRWAV_API drwav_uint64 drwav_read_pcm_frames_s16(drwav* pWav, drwav_uint...
  function DRWAV_API (line 4523) | DRWAV_API drwav_uint64 drwav_read_pcm_frames_s16le(drwav* pWav, drwav_ui...
  function DRWAV_API (line 4533) | DRWAV_API drwav_uint64 drwav_read_pcm_frames_s16be(drwav* pWav, drwav_ui...
  function DRWAV_API (line 4544) | DRWAV_API void drwav_u8_to_s16(drwav_int16* pOut, const drwav_uint8* pIn...
  function DRWAV_API (line 4556) | DRWAV_API void drwav_s24_to_s16(drwav_int16* pOut, const drwav_uint8* pI...
  function DRWAV_API (line 4567) | DRWAV_API void drwav_s32_to_s16(drwav_int16* pOut, const drwav_int32* pI...
  function DRWAV_API (line 4578) | DRWAV_API void drwav_f32_to_s16(drwav_int16* pOut, const float* pIn, siz...
  function DRWAV_API (line 4593) | DRWAV_API void drwav_f64_to_s16(drwav_int16* pOut, const double* pIn, si...
  function DRWAV_API (line 4608) | DRWAV_API void drwav_alaw_to_s16(drwav_int16* pOut, const drwav_uint8* p...
  function DRWAV_API (line 4616) | DRWAV_API void drwav_mulaw_to_s16(drwav_int16* pOut, const drwav_uint8* ...
  function drwav__pcm_to_f32 (line 4626) | static void drwav__pcm_to_f32(float* pOut, const drwav_uint8* pIn, size_...
  function drwav__ieee_to_f32 (line 4675) | static void drwav__ieee_to_f32(float* pOut, const drwav_uint8* pIn, size...
  function drwav_uint64 (line 4694) | static drwav_uint64 drwav_read_pcm_frames_f32__pcm(drwav* pWav, drwav_ui...
  function drwav_uint64 (line 4722) | static drwav_uint64 drwav_read_pcm_frames_f32__msadpcm(drwav* pWav, drwa...
  function drwav_uint64 (line 4746) | static drwav_uint64 drwav_read_pcm_frames_f32__ima(drwav* pWav, drwav_ui...
  function drwav_uint64 (line 4770) | static drwav_uint64 drwav_read_pcm_frames_f32__ieee(drwav* pWav, drwav_u...
  function drwav_uint64 (line 4804) | static drwav_uint64 drwav_read_pcm_frames_f32__alaw(drwav* pWav, drwav_u...
  function drwav_uint64 (line 4831) | static drwav_uint64 drwav_read_pcm_frames_f32__mulaw(drwav* pWav, drwav_...
  function DRWAV_API (line 4859) | DRWAV_API drwav_uint64 drwav_read_pcm_frames_f32(drwav* pWav, drwav_uint...
  function DRWAV_API (line 4901) | DRWAV_API drwav_uint64 drwav_read_pcm_frames_f32le(drwav* pWav, drwav_ui...
  function DRWAV_API (line 4911) | DRWAV_API drwav_uint64 drwav_read_pcm_frames_f32be(drwav* pWav, drwav_ui...
  function DRWAV_API (line 4922) | DRWAV_API void drwav_u8_to_f32(float* pOut, const drwav_uint8* pIn, size...
  function DRWAV_API (line 4951) | DRWAV_API void drwav_s16_to_f32(float* pOut, const drwav_int16* pIn, siz...
  function DRWAV_API (line 4964) | DRWAV_API void drwav_s24_to_f32(float* pOut, const drwav_uint8* pIn, siz...
  function DRWAV_API (line 4983) | DRWAV_API void drwav_s32_to_f32(float* pOut, const drwav_int32* pIn, siz...
  function DRWAV_API (line 4995) | DRWAV_API void drwav_f64_to_f32(float* pOut, const double* pIn, size_t s...
  function DRWAV_API (line 5008) | DRWAV_API void drwav_alaw_to_f32(float* pOut, const drwav_uint8* pIn, si...
  function DRWAV_API (line 5021) | DRWAV_API void drwav_mulaw_to_f32(float* pOut, const drwav_uint8* pIn, s...
  function drwav__pcm_to_s32 (line 5036) | static void drwav__pcm_to_s32(drwav_int32* pOut, const drwav_uint8* pIn,...
  function drwav__ieee_to_s32 (line 5087) | static void drwav__ieee_to_s32(drwav_int32* pOut, const drwav_uint8* pIn...
  function drwav_uint64 (line 5103) | static drwav_uint64 drwav_read_pcm_frames_s32__pcm(drwav* pWav, drwav_ui...
  function drwav_uint64 (line 5137) | static drwav_uint64 drwav_read_pcm_frames_s32__msadpcm(drwav* pWav, drwa...
  function drwav_uint64 (line 5161) | static drwav_uint64 drwav_read_pcm_frames_s32__ima(drwav* pWav, drwav_ui...
  function drwav_uint64 (line 5185) | static drwav_uint64 drwav_read_pcm_frames_s32__ieee(drwav* pWav, drwav_u...
  function drwav_uint64 (line 5213) | static drwav_uint64 drwav_read_pcm_frames_s32__alaw(drwav* pWav, drwav_u...
  function drwav_uint64 (line 5241) | static drwav_uint64 drwav_read_pcm_frames_s32__mulaw(drwav* pWav, drwav_...
  function DRWAV_API (line 5269) | DRWAV_API drwav_uint64 drwav_read_pcm_frames_s32(drwav* pWav, drwav_uint...
  function DRWAV_API (line 5311) | DRWAV_API drwav_uint64 drwav_read_pcm_frames_s32le(drwav* pWav, drwav_ui...
  function DRWAV_API (line 5321) | DRWAV_API drwav_uint64 drwav_read_pcm_frames_s32be(drwav* pWav, drwav_ui...
  function DRWAV_API (line 5332) | DRWAV_API void drwav_u8_to_s32(drwav_int32* pOut, const drwav_uint8* pIn...
  function DRWAV_API (line 5345) | DRWAV_API void drwav_s16_to_s32(drwav_int32* pOut, const drwav_int16* pI...
  function DRWAV_API (line 5358) | DRWAV_API void drwav_s24_to_s32(drwav_int32* pOut, const drwav_uint8* pI...
  function DRWAV_API (line 5376) | DRWAV_API void drwav_f32_to_s32(drwav_int32* pOut, const float* pIn, siz...
  function DRWAV_API (line 5389) | DRWAV_API void drwav_f64_to_s32(drwav_int32* pOut, const double* pIn, si...
  function DRWAV_API (line 5402) | DRWAV_API void drwav_alaw_to_s32(drwav_int32* pOut, const drwav_uint8* p...
  function DRWAV_API (line 5415) | DRWAV_API void drwav_mulaw_to_s32(drwav_int32* pOut, const drwav_uint8* ...
  function drwav_int16 (line 5430) | static drwav_int16* drwav__read_pcm_frames_and_close_s16(drwav* pWav, un...
  function drwav_int32 (line 5514) | static drwav_int32* drwav__read_pcm_frames_and_close_s32(drwav* pWav, un...
  function DRWAV_API (line 5558) | DRWAV_API drwav_int16* drwav_open_and_read_pcm_frames_s16(drwav_read_pro...
  function DRWAV_API (line 5579) | DRWAV_API float* drwav_open_and_read_pcm_frames_f32(drwav_read_proc onRe...
  function DRWAV_API (line 5600) | DRWAV_API drwav_int32* drwav_open_and_read_pcm_frames_s32(drwav_read_pro...
  function DRWAV_API (line 5622) | DRWAV_API drwav_int16* drwav_open_file_and_read_pcm_frames_s16(const cha...
  function DRWAV_API (line 5643) | DRWAV_API float* drwav_open_file_and_read_pcm_frames_f32(const char* fil...
  function DRWAV_API (line 5664) | DRWAV_API drwav_int32* drwav_open_file_and_read_pcm_frames_s32(const cha...
  function DRWAV_API (line 5686) | DRWAV_API drwav_int16* drwav_open_file_and_read_pcm_frames_s16_w(const w...
  function DRWAV_API (line 5707) | DRWAV_API float* drwav_open_file_and_read_pcm_frames_f32_w(const wchar_t...
  function DRWAV_API (line 5728) | DRWAV_API drwav_int32* drwav_open_file_and_read_pcm_frames_s32_w(const w...
  function DRWAV_API (line 5750) | DRWAV_API drwav_int16* drwav_open_memory_and_read_pcm_frames_s16(const v...
  function DRWAV_API (line 5771) | DRWAV_API float* drwav_open_memory_and_read_pcm_frames_f32(const void* d...
  function DRWAV_API (line 5792) | DRWAV_API drwav_int32* drwav_open_memory_and_read_pcm_frames_s32(const v...
  function DRWAV_API (line 5815) | DRWAV_API void drwav_free(void* p, const drwav_allocation_callbacks* pAl...
  function DRWAV_API (line 5824) | DRWAV_API drwav_uint16 drwav_bytes_to_u16(const drwav_uint8* data)
  function DRWAV_API (line 5829) | DRWAV_API drwav_int16 drwav_bytes_to_s16(const drwav_uint8* data)
  function DRWAV_API (line 5834) | DRWAV_API drwav_uint32 drwav_bytes_to_u32(const drwav_uint8* data)
  function DRWAV_API (line 5839) | DRWAV_API drwav_int32 drwav_bytes_to_s32(const drwav_uint8* data)
  function DRWAV_API (line 5844) | DRWAV_API drwav_uint64 drwav_bytes_to_u64(const drwav_uint8* data)
  function DRWAV_API (line 5849) | DRWAV_API drwav_int64 drwav_bytes_to_s64(const drwav_uint8* data)
  function DRWAV_API (line 5855) | DRWAV_API drwav_bool32 drwav_guid_equal(const drwav_uint8 a[16], const d...
  function DRWAV_API (line 5860) | DRWAV_API drwav_bool32 drwav_fourcc_equal(const drwav_uint8* a, const ch...

FILE: examples/json.hpp
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 247) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function NLOHMANN_JSON_NAMESPACE_END (line 258) | NLOHMANN_JSON_NAMESPACE_END
  type would_call_std_ (line 2808) | struct would_call_std_
  type value_t (line 2866) | enum class value_t : std::uint8_t
  function NLOHMANN_JSON_NAMESPACE_END (line 2931) | NLOHMANN_JSON_NAMESPACE_END
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 3024) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 3070) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 3260) | NLOHMANN_JSON_NAMESPACE_BEGIN
  class json_pointer (line 3409) | class json_pointer
  type ordered_map (line 3420) | struct ordered_map
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 3431) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 4166) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function NLOHMANN_JSON_NAMESPACE_END (line 4294) | NLOHMANN_JSON_NAMESPACE_END
  function NLOHMANN_JSON_NAMESPACE_END (line 4526) | NLOHMANN_JSON_NAMESPACE_END
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 4572) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 4580) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 4595) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 5110) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function NLOHMANN_JSON_NAMESPACE_END (line 5292) | NLOHMANN_JSON_NAMESPACE_END
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 5340) | NLOHMANN_JSON_NAMESPACE_BEGIN
  type adl_serializer (line 5768) | struct adl_serializer
    method from_json (line 5773) | static auto from_json(BasicJsonType && j, TargetType& val) noexcept(
    method from_json (line 5783) | static auto from_json(BasicJsonType && j) noexcept(
    method to_json (line 5793) | static auto to_json(BasicJsonType& j, TargetType && val) noexcept(
  function set_subtype (line 5874) | void set_subtype(subtype_type subtype_) noexcept
  function subtype_type (line 5882) | constexpr subtype_type subtype() const noexcept
  function has_subtype (line 5889) | constexpr bool has_subtype() const noexcept
  function clear_subtype (line 5896) | void clear_subtype() noexcept
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 5935) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 6105) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function json_sax_dom_parser (line 6747) | explicit json_sax_dom_parser(BasicJsonType& r, const bool allow_exceptio...
  function json_sax_dom_parser (line 6752) | json_sax_dom_parser(const json_sax_dom_parser&) = delete;
  function json_sax_dom_parser (line 6753) | json_sax_dom_parser(json_sax_dom_parser&&) = default;
  function null (line 6758) | bool null()
  function boolean (line 6764) | bool boolean(bool val)
  function number_integer (line 6770) | bool number_integer(number_integer_t val)
  function number_unsigned (line 6776) | bool number_unsigned(number_unsigned_t val)
  function number_float (line 6782) | bool number_float(number_float_t val, const string_t& /*unused*/)
  function string (line 6788) | bool string(string_t& val)
  function binary (line 6794) | bool binary(binary_t& val)
  function start_object (line 6800) | bool start_object(std::size_t len)
  function key (line 6812) | bool key(string_t& val)
  function end_object (line 6822) | bool end_object()
  function start_array (line 6832) | bool start_array(std::size_t len)
  function end_array (line 6844) | bool end_array()
  function parse_error (line 6855) | bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/,
  function is_errored (line 6867) | constexpr bool is_errored() const
  class json_sax_dom_callback_parser (line 6916) | class json_sax_dom_callback_parser
    method json_sax_dom_callback_parser (line 6927) | json_sax_dom_callback_parser(BasicJsonType& r,
    method json_sax_dom_callback_parser (line 6936) | json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = de...
    method json_sax_dom_callback_parser (line 6937) | json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default;
    method json_sax_dom_callback_parser (line 6938) | json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_pa...
    method json_sax_dom_callback_parser (line 6939) | json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&...
    method null (line 6942) | bool null()
    method boolean (line 6948) | bool boolean(bool val)
    method number_integer (line 6954) | bool number_integer(number_integer_t val)
    method number_unsigned (line 6960) | bool number_unsigned(number_unsigned_t val)
    method number_float (line 6966) | bool number_float(number_float_t val, const string_t& /*unused*/)
    method string (line 6972) | bool string(string_t& val)
    method binary (line 6978) | bool binary(binary_t& val)
    method start_object (line 6984) | bool start_object(std::size_t len)
    method key (line 7002) | bool key(string_t& val)
    method end_object (line 7019) | bool end_object()
    method start_array (line 7055) | bool start_array(std::size_t len)
    method end_array (line 7072) | bool end_array()
    method parse_error (line 7105) | bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/,
    method is_errored (line 7117) | constexpr bool is_errored() const
    method handle_value (line 7139) | std::pair<bool, BasicJsonType*> handle_value(Value&& v, const bool ski...
  class json_sax_acceptor (line 7223) | class json_sax_acceptor
    method null (line 7232) | bool null()
    method boolean (line 7237) | bool boolean(bool /*unused*/)
    method number_integer (line 7242) | bool number_integer(number_integer_t /*unused*/)
    method number_unsigned (line 7247) | bool number_unsigned(number_unsigned_t /*unused*/)
    method number_float (line 7252) | bool number_float(number_float_t /*unused*/, const string_t& /*unused*/)
    method string (line 7257) | bool string(string_t& /*unused*/)
    method binary (line 7262) | bool binary(binary_t& /*unused*/)
    method start_object (line 7267) | bool start_object(std::size_t /*unused*/ = static_cast<std::size_t>(-1))
    method key (line 7272) | bool key(string_t& /*unused*/)
    method end_object (line 7277) | bool end_object()
    method start_array (line 7282) | bool start_array(std::size_t /*unused*/ = static_cast<std::size_t>(-1))
    method end_array (line 7287) | bool end_array()
    method parse_error (line 7292) | bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/...
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 7329) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function reset (line 8624) | void reset() noexcept
  function char_int_type (line 8641) | char_int_type get()
  function unget (line 8678) | void unget()
  function add (line 8705) | void add(char_int_type c)
  function number_unsigned_t (line 8722) | constexpr number_unsigned_t get_number_unsigned() const noexcept
  function number_float_t (line 8728) | constexpr number_float_t get_number_float() const noexcept
  function string_t (line 8734) | string_t& get_string()
  function position_t (line 8744) | constexpr position_t get_position() const noexcept
  function get_token_string (line 8752) | std::string get_token_string() const
  function JSON_HEDLEY_RETURNS_NON_NULL (line 8776) | JSON_HEDLEY_RETURNS_NON_NULL
  function skip_bom (line 8790) | bool skip_bom()
  function skip_whitespace (line 8804) | void skip_whitespace()
  function token_type (line 8813) | token_type scan()
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 8962) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function NLOHMANN_JSON_NAMESPACE_END (line 9102) | NLOHMANN_JSON_NAMESPACE_END
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 12127) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 12639) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function NLOHMANN_JSON_NAMESPACE_END (line 12755) | NLOHMANN_JSON_NAMESPACE_END
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 12810) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function pointer (line 13112) | pointer operator->() const
  function iter_impl (line 13154) | iter_impl operator++(int)& // NOLINT(cert-dcl21-cpp)
  function iter_impl (line 13165) | iter_impl& operator++()
  function iter_impl (line 13205) | iter_impl operator--(int)& // NOLINT(cert-dcl21-cpp)
  function iter_impl (line 13216) | iter_impl& operator--()
  function iter_impl (line 13364) | iter_impl& operator+=(difference_type i)
  function iter_impl (line 13401) | iter_impl& operator-=(difference_type i)
  function iter_impl (line 13410) | iter_impl operator+(difference_type i) const
  function friend (line 13421) | friend iter_impl operator+(difference_type i, const iter_impl& it)
  function iter_impl (line 13432) | iter_impl operator-(difference_type i) const
  function difference_type (line 13443) | difference_type operator-(const iter_impl& other) const
  function reference (line 13472) | reference operator[](difference_type n) const
  function reference (line 13526) | reference value() const
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 13561) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 13686) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 13747) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function NLOHMANN_BASIC_JSON_TPL_DECLARATION (line 13767) | NLOHMANN_BASIC_JSON_TPL_DECLARATION
  function json_pointer (line 13779) | explicit json_pointer(const string_t& s = "")
  function string_t (line 13785) | string_t to_string() const
  function friend (line 13806) | friend std::ostream& operator<<(std::ostream& o, const json_pointer& ptr)
  function json_pointer (line 13815) | json_pointer& operator/=(const json_pointer& ptr)
  function json_pointer (line 13825) | json_pointer& operator/=(string_t token)
  function json_pointer (line 13833) | json_pointer& operator/=(std::size_t array_idx)
  function friend (line 13840) | friend json_pointer operator/(const json_pointer& lhs,
  function friend (line 13848) | friend json_pointer operator/(const json_pointer& lhs, string_t token) /...
  function friend (line 13855) | friend json_pointer operator/(const json_pointer& lhs, std::size_t array...
  function json_pointer (line 13862) | json_pointer parent_pointer() const
  function pop_back (line 13876) | void pop_back()
  function string_t (line 13888) | const string_t& back() const
  function push_back (line 13900) | void push_back(const string_t& token)
  function push_back (line 13907) | void push_back(string_t&& token)
  function empty (line 13914) | bool empty() const noexcept
  function BasicJsonType (line 13991) | BasicJsonType& get_and_create(BasicJsonType& j) const
  function BasicJsonType (line 14071) | BasicJsonType& get_unchecked(BasicJsonType* ptr) const
  function BasicJsonType (line 14139) | BasicJsonType& get_checked(BasicJsonType* ptr) const
  function BasicJsonType (line 14197) | const BasicJsonType& get_unchecked(const BasicJsonType* ptr) const
  function BasicJsonType (line 14246) | const BasicJsonType& get_checked(const BasicJsonType* ptr) const
  function contains (line 14295) | bool contains(const BasicJsonType* ptr) const
  function split (line 14383) | static std::vector<string_t> split(const string_t& reference_string)
  function BasicJsonType (line 14523) | static BasicJsonType
  function convert (line 14552) | json_pointer<string_t> convert() const&
  function convert (line 14559) | json_pointer<string_t> convert()&&
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 14726) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 14851) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function NLOHMANN_JSON_NAMESPACE_END (line 14973) | NLOHMANN_JSON_NAMESPACE_END
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 16840) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function NLOHMANN_JSON_NAMESPACE_END (line 17937) | NLOHMANN_JSON_NAMESPACE_END
  function hex_bytes (line 18597) | static std::string hex_bytes(std::uint8_t byte)
  function is_negative_number (line 18608) | bool is_negative_number(NumberType x)
  function is_negative_number (line 18614) | bool is_negative_number(NumberType /*unused*/)
  function dump_integer (line 18634) | void dump_integer(NumberType x)
  function dump_float (line 18719) | void dump_float(number_float_t x)
  function dump_float (line 18740) | void dump_float(number_float_t x, std::true_type /*is_ieee_single_or_dou...
  function dump_float (line 18748) | void dump_float(number_float_t x, std::false_type /*is_ieee_single_or_do...
  function decode (line 18820) | static std::uint8_t decode(std::uint8_t& state, std::uint32_t& codep, co...
  function number_unsigned_t (line 18860) | number_unsigned_t remove_sign(number_unsigned_t x)
  function number_unsigned_t (line 18875) | inline number_unsigned_t remove_sign(number_integer_t x) noexcept
  function ordered_map (line 18962) | ordered_map() noexcept(noexcept(Container())) : Container{} {}
  function ordered_map (line 18963) | explicit ordered_map(const Allocator& alloc) noexcept(noexcept(Container...
  function ordered_map (line 18965) | ordered_map(It first, It last, const Allocator& alloc = Allocator())
  function ordered_map (line 18967) | ordered_map(std::initializer_list<value_type> init, const Allocator& all...
  function emplace (line 18970) | std::pair<iterator, bool> emplace(const key_type& key, T&& t)
  function emplace (line 18985) | std::pair<iterator, bool> emplace(KeyType && key, T && t)
  function T (line 18998) | T& operator[](const key_type& key)
  function T (line 19005) | T & operator[](KeyType && key)
  function T (line 19010) | const T& operator[](const key_type& key) const
  function T (line 19017) | const T & operator[](KeyType && key) const
  function T (line 19022) | T& at(const key_type& key)
  function T (line 19037) | T & at(KeyType && key) // NOLINT(cppcoreguidelines-missing-std-forward)
  function T (line 19050) | const T& at(const key_type& key) const
  function T (line 19065) | const T & at(KeyType && key) const // NOLINT(cppcoreguidelines-missing-s...
  function size_type (line 19078) | size_type erase(const key_type& key)
  function size_type (line 19099) | size_type erase(KeyType && key) // NOLINT(cppcoreguidelines-missing-std-...
  function iterator (line 19118) | iterator erase(iterator pos)
  function iterator (line 19123) | iterator erase(iterator first, iterator last)
  function size_type (line 19176) | size_type count(const key_type& key) const
  function size_type (line 19190) | size_type count(KeyType && key) const // NOLINT(cppcoreguidelines-missin...
  function iterator (line 19202) | iterator find(const key_type& key)
  function iterator (line 19216) | iterator find(KeyType && key) // NOLINT(cppcoreguidelines-missing-std-fo...
  function const_iterator (line 19228) | const_iterator find(const key_type& key) const
  function insert (line 19240) | std::pair<iterator, bool> insert( value_type&& value )
  function insert (line 19245) | std::pair<iterator, bool> insert( const value_type& value )
  function insert (line 19263) | void insert(InputIt first, InputIt last)
  function NLOHMANN_JSON_NAMESPACE_BEGIN (line 19290) | NLOHMANN_JSON_NAMESPACE_BEGIN
  function set_parents (line 19917) | void set_parents()
  function iterator (line 19954) | iterator set_parents(iterator it, typename iterator::difference_type cou...
  function reference (line 19967) | reference set_parent(reference j, std::size_t old_capacity = static_cast...
  function basic_json (line 20029) | basic_json(const value_t v)
  function basic_json (line 20037) | basic_json(std::nullptr_t = nullptr) noexcept // NOLINT(bugprone-excepti...
  function basic_json (line 20049) | basic_json(CompatibleType && val) noexcept(noexcept( // NOLINT(bugprone-...
  function basic_json (line 20063) | basic_json(const BasicJsonType& val)
  function basic_json (line 20116) | basic_json(initializer_list_t init,
  function JSON_HEDLEY_WARN_UNUSED_RESULT (line 20174) | JSON_HEDLEY_WARN_UNUSED_RESULT
  function JSON_HEDLEY_WARN_UNUSED_RESULT (line 20185) | JSON_HEDLEY_WARN_UNUSED_RESULT
  function JSON_HEDLEY_WARN_UNUSED_RESULT (line 20196) | JSON_HEDLEY_WARN_UNUSED_RESULT
  function JSON_HEDLEY_WARN_UNUSED_RESULT (line 20207) | JSON_HEDLEY_WARN_UNUSED_RESULT
  function JSON_HEDLEY_WARN_UNUSED_RESULT (line 20218) | JSON_HEDLEY_WARN_UNUSED_RESULT
  function JSON_HEDLEY_WARN_UNUSED_RESULT (line 20226) | JSON_HEDLEY_WARN_UNUSED_RESULT
  function basic_json (line 20234) | basic_json(size_type cnt, const basic_json& val):
  function basic_json (line 20246) | basic_json(InputIT first, InputIT last)
  function basic_json (line 20355) | basic_json(const JsonRef& ref) : basic_json(ref.moved_or_copied()) {}
  function basic_json (line 20359) | basic_json(const basic_json& other)
  function basic_json (line 20428) | basic_json(basic_json&& other) noexcept
  function basic_json (line 20445) | basic_json& operator=(basic_json other) noexcept (
  function value_t (line 20508) | constexpr value_t type() const noexcept
  function is_primitive (line 20515) | constexpr bool is_primitive() const noexcept
  function is_structured (line 20522) | constexpr bool is_structured() const noexcept
  function is_null (line 20529) | constexpr bool is_null() const noexcept
  function is_boolean (line 20536) | constexpr bool is_boolean() const noexcept
  function is_number (line 20543) | constexpr bool is_number() const noexcept
  function is_number_integer (line 20550) | constexpr bool is_number_integer() const noexcept
  function is_number_unsigned (line 20557) | constexpr bool is_number_unsigned() const noexcept
  function is_number_float (line 20564) | constexpr bool is_number_float() const noexcept
  function is_object (line 20571) | constexpr bool is_object() const noexcept
  function is_array (line 20578) | constexpr bool is_array() const noexcept
  function is_string (line 20585) | constexpr bool is_string() const noexcept
  function is_binary (line 20592) | constexpr bool is_binary() const noexcept
  function is_discarded (line 20599) | constexpr bool is_discarded() const noexcept
  function object_t (line 20630) | object_t* get_impl_ptr(object_t* /*unused*/) noexcept
  function object_t (line 20636) | constexpr const object_t* get_impl_ptr(const object_t* /*unused*/) const...
  function array_t (line 20642) | array_t* get_impl_ptr(array_t* /*unused*/) noexcept
  function array_t (line 20648) | constexpr const array_t* get_impl_ptr(const array_t* /*unused*/) const n...
  function string_t (line 20654) | string_t* get_impl_ptr(string_t* /*unused*/) noexcept
  function string_t (line 20660) | constexpr const string_t* get_impl_ptr(const string_t* /*unused*/) const...
  function boolean_t (line 20666) | boolean_t* get_impl_ptr(boolean_t* /*unused*/) noexcept
  function boolean_t (line 20672) | constexpr const boolean_t* get_impl_ptr(const boolean_t* /*unused*/) con...
  function number_integer_t (line 20678) | number_integer_t* get_impl_ptr(number_integer_t* /*unused*/) noexcept
  function number_integer_t (line 20684) | constexpr const number_integer_t* get_impl_ptr(const number_integer_t* /...
  function number_unsigned_t (line 20690) | number_unsigned_t* get_impl_ptr(number_unsigned_t* /*unused*/) noexcept
  function number_unsigned_t (line 20696) | constexpr const number_unsigned_t* get_impl_ptr(const number_unsigned_t*...
  function number_float_t (line 20702) | number_float_t* get_impl_ptr(number_float_t* /*unused*/) noexcept
  function number_float_t (line 20708) | constexpr const number_float_t* get_impl_ptr(const number_float_t* /*unu...
  function binary_t (line 20714) | binary_t* get_impl_ptr(binary_t* /*unused*/) noexcept
  function binary_t (line 20720) | constexpr const binary_t* get_impl_ptr(const binary_t* /*unused*/) const...
  function ReferenceType (line 20737) | static ReferenceType get_ref_impl(ThisType& obj)
  function get_ptr (line 20770) | constexpr auto get_ptr() const noexcept -> decltype(std::declval<const b...
  function ValueType (line 20862) | ValueType get_impl(detail::priority_tag<1> /*unused*/) const noexcept(no...
  function BasicJsonType (line 20887) | BasicJsonType get_impl(detail::priority_tag<2> /*unused*/) const
  function basic_json (line 20910) | basic_json get_impl(detail::priority_tag<3> /*unused*/) const
  function get_impl (line 20923) | constexpr auto get_impl(detail::priority_tag<4> /*unused*/) const noexcept
  function get (line 20999) | auto get() noexcept -> decltype(std::declval<basic_json_t&>().template g...
  function ValueType (line 21012) | ValueType & get_to(ValueType& v) const noexcept(noexcept(
  function ValueType (line 21025) | ValueType & get_to(ValueType& v) const
  function Array (line 21036) | Array get_to(T (&v)[N]) const // NOLINT(cppcoreguidelines-avoid-c-arrays...
  function ReferenceType (line 21048) | ReferenceType get_ref()
  function ReferenceType (line 21059) | ReferenceType get_ref() const
  function binary_t (line 21118) | binary_t& get_binary()
  function binary_t (line 21130) | const binary_t& get_binary() const
  function reference (line 21152) | reference at(size_type idx)
  function const_reference (line 21175) | const_reference at(size_type idx) const
  function reference (line 21198) | reference at(const typename object_t::key_type& key)
  function reference (line 21218) | reference at(KeyType && key)
  function const_reference (line 21236) | const_reference at(const typename object_t::key_type& key) const
  function const_reference (line 21256) | const_reference at(KeyType && key) const
  function reference (line 21274) | reference operator[](size_type idx)
  function const_reference (line 21320) | const_reference operator[](size_type idx) const
  function reference (line 21333) | reference operator[](typename object_t::key_type key)
  function const_reference (line 21355) | const_reference operator[](const typename object_t::key_type& key) const
  function reference (line 21371) | reference operator[](T* key)
  function const_reference (line 21377) | const_reference operator[](T* key) const
  function reference (line 21386) | reference operator[](KeyType && key)
  function const_reference (line 21410) | const_reference operator[](KeyType && key) const
  class ValueType (line 21436) | class ValueType
  function ReturnType (line 21465) | ReturnType value(const typename object_t::key_type& key, ValueType && de...
  function ValueType (line 21491) | ValueType value(KeyType && key, const ValueType& default_value) const
  function ReturnType (line 21518) | ReturnType value(KeyType && key, ValueType && default_value) const
  function ValueType (line 21541) | ValueType value(const json_pointer& ptr, const ValueType& default_value)...
  function ReturnType (line 21566) | ReturnType value(const json_pointer& ptr, ValueType && default_value) const
  function ValueType (line 21590) | ValueType value(const ::nlohmann::json_pointer<BasicJsonType>& ptr, cons...
  function ReturnType (line 21601) | ReturnType value(const ::nlohmann::json_pointer<BasicJsonType>& ptr, Val...
  function reference (line 21608) | reference front()
  function const_reference (line 21615) | const_reference front() const
  function reference (line 21622) | reference back()
  function const_reference (line 21631) | const_reference back() const
  function IteratorType (line 21643) | IteratorType erase(IteratorType pos)
  function IteratorType (line 21713) | IteratorType erase(IteratorType first, IteratorType last)
  function erase_internal (line 21781) | private:
  function size_type (line 21797) | size_type erase_internal(KeyType && key)
  function size_type (line 21829) | size_type erase(KeyType && key)
  function erase (line 21836) | void erase(const size_type idx)
  function iterator (line 21865) | iterator find(const typename object_t::key_type& key)
  function const_iterator (line 21879) | const_iterator find(const typename object_t::key_type& key) const
  function iterator (line 21895) | iterator find(KeyType && key)
  function const_iterator (line 21911) | const_iterator find(KeyType && key) const
  function size_type (line 21925) | size_type count(const typename object_t::key_type& key) const
  function size_type (line 21935) | size_type count(KeyType && key) const
  function contains (line 21943) | bool contains(const typename object_t::key_type& key) const
  function contains (line 21952) | bool contains(KeyType && key) const
  function contains (line 21959) | bool contains(const json_pointer& ptr) const
  function contains (line 21966) | bool contains(const typename ::nlohmann::json_pointer<BasicJsonType>& pt...
  function iterator (line 21982) | iterator begin() noexcept
  function const_iterator (line 21991) | const_iterator begin() const noexcept
  function const_iterator (line 21998) | const_iterator cbegin() const noexcept
  function iterator (line 22007) | iterator end() noexcept
  function const_iterator (line 22016) | const_iterator end() const noexcept
  function const_iterator (line 22023) | const_iterator cend() const noexcept
  function reverse_iterator (line 22032) | reverse_iterator rbegin() noexcept
  function const_reverse_iterator (line 22039) | const_reverse_iterator rbegin() const noexcept
  function reverse_iterator (line 22046) | reverse_iterator rend() noexcept
  function const_reverse_iterator (line 22053) | const_reverse_iterator rend() const noexcept
  function const_reverse_iterator (line 22060) | const_reverse_iterator crbegin() const noexcept
  function const_reverse_iterator (line 22067) | const_reverse_iterator crend() const noexcept
  function iterator_wrapper (line 22079) | static iteration_proxy<iterator> iterator_wrapper(reference ref) noexcept
  function iterator_wrapper (line 22090) | static iteration_proxy<const_iterator> iterator_wrapper(const_reference ...
  function items (line 22097) | iteration_proxy<iterator> items() noexcept
  function items (line 22104) | iteration_proxy<const_iterator> items() const noexcept
  function empty (line 22120) | bool empty() const noexcept
  function size_type (line 22159) | size_type size() const noexcept
  function size_type (line 22198) | size_type max_size() const noexcept
  function clear (line 22241) | void clear() noexcept
  function push_back (line 22302) | void push_back(basic_json&& val)
  function reference (line 22327) | reference operator+=(basic_json&& val)
  function push_back (line 22335) | void push_back(const basic_json& val)
  function reference (line 22359) | reference operator+=(const basic_json& val)
  function push_back (line 22367) | void push_back(const typename object_t::value_type& val)
  function reference (line 22390) | reference operator+=(const typename object_t::value_type& val)
  function push_back (line 22398) | void push_back(initializer_list_t init)
  function reference (line 22414) | reference operator+=(initializer_list_t init)
  function reference (line 22423) | reference emplace_back(Args&& ... args)
  function emplace (line 22448) | std::pair<iterator, bool> emplace(Args&& ... args)
  function iterator (line 22480) | iterator insert_iterator(const_iterator pos, Args&& ... args)
  function iterator (line 22499) | iterator insert(const_iterator pos, const basic_json& val)
  function iterator (line 22519) | iterator insert(const_iterator pos, basic_json&& val)
  function iterator (line 22526) | iterator insert(const_iterator pos, size_type cnt, const basic_json& val)
  function iterator (line 22546) | iterator insert(const_iterator pos, const_iterator first, const_iterator...
  function iterator (line 22577) | iterator insert(const_iterator pos, initializer_list_t ilist)
  function insert (line 22597) | void insert(const_iterator first, const_iterator last)
  function update (line 22622) | void update(const_reference j, bool merge_objects = false)
  function update (line 22629) | void update(const_iterator first, const_iterator last, bool merge_object...
  function swap (line 22676) | void swap(reference other) noexcept (
  function friend (line 22693) | friend void swap(reference left, reference right) noexcept (
  function swap (line 22705) | void swap(array_t& other) // NOLINT(bugprone-exception-escape,cppcoregui...
  function swap (line 22721) | void swap(object_t& other) // NOLINT(bugprone-exception-escape,cppcoregu...
  function swap (line 22737) | void swap(string_t& other) // NOLINT(bugprone-exception-escape,cppcoregu...
  function swap (line 22753) | void swap(binary_t& other) // NOLINT(bugprone-exception-escape,cppcoregu...
  function swap (line 22769) | void swap(typename binary_t::container_type& other) // NOLINT(bugprone-e...
  function else (line 22858) | else if(compares_unordered(lhs, rhs))\
  function compares_unordered (line 22887) | bool compares_unordered(const_reference rhs, bool inverse = false) const...
  function friend (line 23000) | friend bool operator==(const_reference lhs, const_reference rhs) noexcept
  function friend (line 23032) | friend bool operator!=(const_reference lhs, const_reference rhs) noexcept
  function friend (line 23089) | friend bool operator<=(const_reference lhs, const_reference rhs) noexcept
  function friend (line 23118) | friend bool operator>(const_reference lhs, const_reference rhs) noexcept
  function friend (line 23148) | friend bool operator>=(const_reference lhs, const_reference rhs) noexcept
  function friend (line 23189) | friend std::ostream& operator<<(std::ostream& o, const basic_json& j)
  function JSON_HEDLEY_WARN_UNUSED_RESULT (line 23228) | JSON_HEDLEY_WARN_UNUSED_RESULT
  function JSON_HEDLEY_WARN_UNUSED_RESULT (line 23242) | JSON_HEDLEY_WARN_UNUSED_RESULT
  function basic_json (line 23256) | static basic_json parse(detail::span_input_adapter&& i,
  function accept (line 23269) | static bool accept(InputType&& i,
  function accept (line 23278) | static bool accept(IteratorType first, IteratorType last,
  function accept (line 23286) | static bool accept(detail::span_input_adapter&& i,
  function sax_parse (line 23296) | static bool sax_parse(InputType&& i, SAX* sax,
  function sax_parse (line 23311) | static bool sax_parse(IteratorType first, IteratorType last, SAX* sax,
  function sax_parse (line 23330) | static bool sax_parse(detail::span_input_adapter&& i, SAX* sax,
  function JSON_HEDLEY_RETURNS_NON_NULL (line 23371) | JSON_HEDLEY_RETURNS_NON_NULL
  type data (line 23403) | struct data
    method data (line 23411) | data(const value_t v)
    method data (line 23416) | data(size_type cnt, const basic_json& val)
    method data (line 23422) | data() noexcept = default;
    method data (line 23423) | data(data&&) noexcept = default;
    method data (line 23424) | data(const data&) noexcept = delete;
    method data (line 23425) | data& operator=(data&&) noexcept = delete;
    method data (line 23426) | data& operator=(const data&) noexcept = delete;
  function to_cbor (line 23460) | static void to_cbor(const basic_json& j, detail::output_adapter<std::uin...
  function to_cbor (line 23467) | static void to_cbor(const basic_json& j, detail::output_adapter<char> o)
  function to_msgpack (line 23474) | static std::vector<std::uint8_t> to_msgpack(const basic_json& j)
  function to_msgpack (line 23483) | static void to_msgpack(const basic_json& j, detail::output_adapter<std::...
  function to_msgpack (line 23490) | static void to_msgpack(const basic_json& j, detail::output_adapter<char> o)
  function to_ubjson (line 23497) | static std::vector<std::uint8_t> to_ubjson(const basic_json& j,
  function to_ubjson (line 23508) | static void to_ubjson(const basic_json& j, detail::output_adapter<std::u...
  function to_ubjson (line 23516) | static void to_ubjson(const basic_json& j, detail::output_adapter<char> o,
  function to_bjdata (line 23524) | static std::vector<std::uint8_t> to_bjdata(const basic_json& j,
  function to_bjdata (line 23535) | static void to_bjdata(const basic_json& j, detail::output_adapter<std::u...
  function to_bjdata (line 23543) | static void to_bjdata(const basic_json& j, detail::output_adapter<char> o,
  function to_bson (line 23551) | static std::vector<std::uint8_t> to_bson(const basic_json& j)
  function to_bson (line 23560) | static void to_bson(const basic_json& j, detail::output_adapter<std::uin...
  function to_bson (line 23567) | static void to_bson(const basic_json& j, detail::output_adapter<char> o)
  function JSON_HEDLEY_WARN_UNUSED_RESULT (line 23575) | JSON_HEDLEY_WARN_UNUSED_RESULT
  function JSON_HEDLEY_WARN_UNUSED_RESULT (line 23591) | JSON_HEDLEY_WARN_UNUSED_RESULT
  function basic_json (line 23607) | static basic_json from_cbor(const T* ptr, std::size_t len,
  function basic_json (line 23617) | static basic_json from_cbor(detail::span_input_adapter&& i,
  function JSON_HEDLEY_WARN_UNUSED_RESULT (line 23633) | JSON_HEDLEY_WARN_UNUSED_RESULT
  function JSON_HEDLEY_WARN_UNUSED_RESULT (line 23648) | JSON_HEDLEY_WARN_UNUSED_RESULT
  function basic_json (line 23663) | static basic_json from_msgpack(const T* ptr, std::size_t len,
  function basic_json (line 23672) | static basic_json from_msgpack(detail::span_input_adapter&& i,
  function JSON_HEDLEY_WARN_UNUSED_RESULT (line 23687) | JSON_HEDLEY_WARN_UNUSED_RESULT
  function JSON_HEDLEY_WARN_UNUSED_RESULT (line 23702) | JSON_HEDLEY_WARN_UNUSED_RESULT
  function basic_json (line 23717) | static basic_json from_ubjson(const T* ptr, std::size_t len,
  function basic_json (line 23726) | static basic_json from_ubjson(detail::span_input_adapter&& i,
  function JSON_HEDLEY_WARN_UNUSED_RESULT (line 23741) | JSON_HEDLEY_WARN_UNUSED_RESULT
  function JSON_HEDLEY_WARN_UNUSED_RESULT (line 23756) | JSON_HEDLEY_WARN_UNUSED_RESULT
  function JSON_HEDLEY_WARN_UNUSED_RESULT (line 23771) | JSON_HEDLEY_WARN_UNUSED_RESULT
  function JSON_HEDLEY_WARN_UNUSED_RESULT (line 23786) | JSON_HEDLEY_WARN_UNUSED_RESULT
  function basic_json (line 23801) | static basic_json from_bson(const T* ptr, std::size_t len,
  function basic_json (line 23810) | static basic_json from_bson(detail::span_input_adapter&& i,
  function reference (line 23832) | reference operator[](const json_pointer& ptr)
  function reference (line 23839) | reference operator[](const ::nlohmann::json_pointer<BasicJsonType>& ptr)
  function const_reference (line 23846) | const_reference operator[](const json_pointer& ptr) const
  function const_reference (line 23853) | const_reference operator[](const ::nlohmann::json_pointer<BasicJsonType>...
  function reference (line 23860) | reference at(const json_pointer& ptr)
  function reference (line 23867) | reference at(const ::nlohmann::json_pointer<BasicJsonType>& ptr)
  function const_reference (line 23874) | const_reference at(const json_pointer& ptr) const
  function const_reference (line 23881) | const_reference at(const ::nlohmann::json_pointer<BasicJsonType>& ptr) c...
  function basic_json (line 23888) | basic_json flatten() const
  function basic_json (line 23897) | basic_json unflatten() const
  function patch_inplace (line 23913) | void patch_inplace(const basic_json& json_patch)
  function basic_json (line 24184) | basic_json patch(const basic_json& json_patch) const
  function JSON_HEDLEY_WARN_UNUSED_RESULT (line 24193) | JSON_HEDLEY_WARN_UNUSED_RESULT
  function merge_patch (line 24336) | void merge_patch(const basic_json& apply_patch)
  function NLOHMANN_BASIC_JSON_TPL_DECLARATION (line 24367) | NLOHMANN_BASIC_JSON_TPL_DECLARATION
  function NLOHMANN_JSON_NAMESPACE_END (line 24404) | NLOHMANN_JSON_NAMESPACE_END

FILE: examples/main/main.cpp
  function main (line 10) | int main(int argc, char **argv) {

FILE: lstm.h
  type encodec_lstm (line 8) | struct encodec_lstm {
  type ggml_tensor (line 22) | struct ggml_tensor
  type ggml_context (line 22) | struct ggml_context
  type ggml_tensor (line 23) | struct ggml_tensor
  type ggml_tensor (line 24) | struct ggml_tensor
  type ggml_tensor (line 25) | struct ggml_tensor
  type ggml_tensor (line 26) | struct ggml_tensor
  type ggml_tensor (line 27) | struct ggml_tensor
  type ggml_tensor (line 39) | struct ggml_tensor
  type ggml_tensor (line 42) | struct ggml_tensor
  type ggml_tensor (line 46) | struct ggml_tensor
  type ggml_tensor (line 50) | struct ggml_tensor
  type ggml_tensor (line 53) | struct ggml_tensor
  type ggml_tensor (line 55) | struct ggml_tensor
  type ggml_tensor (line 58) | struct ggml_tensor
  type ggml_tensor (line 61) | struct ggml_tensor
  type ggml_tensor (line 63) | struct ggml_tensor
  type ggml_tensor (line 64) | struct ggml_tensor
  type ggml_tensor (line 65) | struct ggml_tensor
  type ggml_tensor (line 66) | struct ggml_tensor

FILE: ops.cpp
  function get_extra_padding_for_conv_1d (line 10) | static int get_extra_padding_for_conv_1d(struct ggml_tensor *inp, float ...
  type ggml_tensor (line 18) | struct ggml_tensor
  type ggml_context (line 18) | struct ggml_context
  type ggml_tensor (line 18) | struct ggml_tensor
  type ggml_tensor (line 30) | struct ggml_tensor
  type ggml_tensor (line 34) | struct ggml_tensor
  type ggml_tensor (line 37) | struct ggml_tensor
  type ggml_tensor (line 42) | struct ggml_tensor
  type ggml_context (line 42) | struct ggml_context
  type ggml_tensor (line 42) | struct ggml_tensor
  type ggml_tensor (line 54) | struct ggml_tensor
  type ggml_tensor (line 59) | struct ggml_tensor
  type ggml_context (line 59) | struct ggml_context
  type ggml_tensor (line 59) | struct ggml_tensor
  type ggml_tensor (line 60) | struct ggml_tensor
  type ggml_tensor (line 60) | struct ggml_tensor
  type ggml_tensor (line 66) | struct ggml_tensor
  type ggml_tensor (line 67) | struct ggml_tensor
  type ggml_tensor (line 77) | struct ggml_tensor
  type ggml_context (line 77) | struct ggml_context
  type ggml_tensor (line 77) | struct ggml_tensor
  type ggml_tensor (line 78) | struct ggml_tensor
  type ggml_tensor (line 78) | struct ggml_tensor
  type ggml_tensor (line 80) | struct ggml_tensor
  type ggml_tensor (line 94) | struct ggml_tensor

FILE: ops.h
  type ggml_tensor (line 5) | struct ggml_tensor
  type ggml_context (line 5) | struct ggml_context
  type ggml_tensor (line 5) | struct ggml_tensor
  type ggml_tensor (line 8) | struct ggml_tensor
  type ggml_context (line 8) | struct ggml_context
  type ggml_tensor (line 8) | struct ggml_tensor
  type ggml_tensor (line 11) | struct ggml_tensor
  type ggml_context (line 11) | struct ggml_context
  type ggml_tensor (line 11) | struct ggml_tensor
  type ggml_tensor (line 12) | struct ggml_tensor
  type ggml_tensor (line 12) | struct ggml_tensor
  type ggml_tensor (line 15) | struct ggml_tensor
  type ggml_context (line 15) | struct ggml_context
  type ggml_tensor (line 15) | struct ggml_tensor
  type ggml_tensor (line 16) | struct ggml_tensor
  type ggml_tensor (line 16) | struct ggml_tensor

FILE: quantizer.h
  type encodec_quant_block (line 12) | struct encodec_quant_block {
  type encodec_quantizer (line 16) | struct encodec_quantizer {
  type ggml_tensor (line 20) | struct ggml_tensor
  type encodec_quantizer (line 21) | struct encodec_quantizer
  type ggml_context (line 21) | struct ggml_context
  type ggml_tensor (line 22) | struct ggml_tensor
  type ggml_tensor (line 35) | struct ggml_tensor
  type ggml_tensor (line 38) | struct ggml_tensor
  type ggml_tensor (line 39) | struct ggml_tensor
  type ggml_tensor (line 40) | struct ggml_tensor
  type ggml_tensor (line 47) | struct ggml_tensor
  type ggml_tensor (line 51) | struct ggml_tensor
  type ggml_tensor (line 52) | struct ggml_tensor
  type ggml_tensor (line 55) | struct ggml_tensor
  type ggml_tensor (line 56) | struct ggml_tensor
  type ggml_tensor (line 59) | struct ggml_tensor
  type ggml_tensor (line 68) | struct ggml_tensor
  type ggml_tensor (line 78) | struct ggml_tensor
  type encodec_quantizer (line 79) | struct encodec_quantizer
  type ggml_context (line 79) | struct ggml_context
  type ggml_tensor (line 80) | struct ggml_tensor
  type ggml_tensor (line 95) | struct ggml_tensor
  type ggml_tensor (line 102) | struct ggml_tensor
  type ggml_tensor (line 103) | struct ggml_tensor

FILE: utils.h
  function get_num_codebooks (line 15) | int32_t get_num_codebooks(float bandwidth, int hop_length, float sample_...
  function get_bandwidth_per_quantizer (line 22) | int32_t get_bandwidth_per_quantizer(int bins, float frame_rate) {
  function get_num_quantizers_for_bandwidth (line 26) | int32_t get_num_quantizers_for_bandwidth(int bins, float frame_rate, flo...
Condensed preview — 27 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (1,293K chars).
[
  {
    "path": ".github/workflows/build.yml",
    "chars": 2128,
    "preview": "name: build\n\non:\n  push:\n    branches:\n      - main\n    paths: ['.github/workflows/**', '**/CMakeLists.txt', '**/Makefil"
  },
  {
    "path": ".gitignore",
    "chars": 58,
    "preview": "main.dSYM\n*.bin\n\nencodec\n*.o\n*.th\n.vscode/\n\nbuild/\n\n*.wav\n"
  },
  {
    "path": ".gitmodules",
    "chars": 77,
    "preview": "[submodule \"ggml\"]\n\tpath = ggml\n\turl = https://github.com/ggerganov/ggml.git\n"
  },
  {
    "path": "CMakeLists.txt",
    "chars": 1330,
    "preview": "cmake_minimum_required(VERSION 3.12)\nproject(\"encodec\" C CXX)\n\nif (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE)\n    "
  },
  {
    "path": "README.md",
    "chars": 1928,
    "preview": "# encodec.cpp\n\n![encodec.cpp](./assets/banner.png)\n\n[![Actions Status](https://github.com/PABannier/encodec.cpp/actions/"
  },
  {
    "path": "convert.py",
    "chars": 5432,
    "preview": "\"\"\"Convert Encodec checkpoint into the GGML format.\n\nThe bytes are packed in a binary file in the following order:\n    -"
  },
  {
    "path": "decoder.h",
    "chars": 2992,
    "preview": "#pragma once\n\n#include <vector>\n\n#include \"ggml.h\"\n#include \"ggml-alloc.h\"\n#include \"ggml-backend.h\"\n\n#include \"lstm.h\"\n"
  },
  {
    "path": "encodec.cpp",
    "chars": 39783,
    "preview": "#include \"ggml-alloc.h\"\n#include \"ggml-backend.h\"\n#include \"ggml.h\"\n#include \"ggml/src/ggml-impl.h\"\n\n#ifdef GGML_USE_CUB"
  },
  {
    "path": "encodec.h",
    "chars": 6790,
    "preview": "/*\n╞══════════════════════════════════════════════════════════════════════════════╡\n│ Copyright 2024 Pierre-Antoine Bann"
  },
  {
    "path": "encoder.h",
    "chars": 2930,
    "preview": "#pragma once\n\n#include <vector>\n\n#include \"ggml.h\"\n#include \"lstm.h\"\n\n// res + downsample block at some ratio\nstruct enc"
  },
  {
    "path": "examples/CMakeLists.txt",
    "chars": 239,
    "preview": "add_library(common STATIC common.cpp)\ntarget_include_directories(common PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})\ntarget_compi"
  },
  {
    "path": "examples/README.md",
    "chars": 970,
    "preview": "# Examples\n\n## main (Encoder/decoder end-to-end)\n\nThis example shows an end-to-end pipeline to generate codes from a raw"
  },
  {
    "path": "examples/common.cpp",
    "chars": 9917,
    "preview": "#include <cstdint>\n#include <cmath>\n#include <fstream>\n#include <iostream>\n#include <stdexcept>\n#include <string>\n#inclu"
  },
  {
    "path": "examples/common.h",
    "chars": 2630,
    "preview": "#include <string>\n#include <vector>\n\n/**\n * @brief Struct containing parameters for the encodec context.\n *\n */\nstruct e"
  },
  {
    "path": "examples/compress/CMakeLists.txt",
    "chars": 165,
    "preview": "set(TARGET compress)\nadd_executable(${TARGET} main.cpp)\ntarget_link_libraries(${TARGET} PRIVATE encodec common)\ntarget_c"
  },
  {
    "path": "examples/compress/main.cpp",
    "chars": 2129,
    "preview": "#include <cstring>\n#include <memory>\n#include <string>\n#include <thread>\n\n#include \"encodec.h\"\n#include \"common.h\"\n\n\nint"
  },
  {
    "path": "examples/decompress/CMakeLists.txt",
    "chars": 167,
    "preview": "set(TARGET decompress)\nadd_executable(${TARGET} main.cpp)\ntarget_link_libraries(${TARGET} PRIVATE encodec common)\ntarget"
  },
  {
    "path": "examples/decompress/main.cpp",
    "chars": 2015,
    "preview": "#include <cstring>\n#include <memory>\n#include <string>\n#include <thread>\n\n#include \"encodec.h\"\n#include \"common.h\"\n\n\nint"
  },
  {
    "path": "examples/dr_wav.h",
    "chars": 241357,
    "preview": "/*\nWAV audio loader and writer. Choice of public domain or MIT-0. See license statements at the end of this file.\ndr_wav"
  },
  {
    "path": "examples/json.hpp",
    "chars": 916935,
    "preview": "//     __ _____ _____ _____\n//  __|  |   __|     |   | |  JSON for Modern C++\n// |  |  |__   |  |  | | | |  version 3.11"
  },
  {
    "path": "examples/main/CMakeLists.txt",
    "chars": 161,
    "preview": "set(TARGET main)\nadd_executable(${TARGET} main.cpp)\ntarget_link_libraries(${TARGET} PRIVATE encodec common)\ntarget_compi"
  },
  {
    "path": "examples/main/main.cpp",
    "chars": 1992,
    "preview": "#include <cstring>\n#include <memory>\n#include <string>\n#include <thread>\n\n#include \"encodec.h\"\n#include \"common.h\"\n\n\nint"
  },
  {
    "path": "lstm.h",
    "chars": 2923,
    "preview": "#pragma once\n\n#include \"ggml.h\"\n#include \"ggml-alloc.h\"\n\n#include \"ops.h\"\n\nstruct encodec_lstm {\n    struct ggml_tensor "
  },
  {
    "path": "ops.cpp",
    "chars": 3472,
    "preview": "#include <algorithm>\n#include <cassert>\n#include <cmath>\n#include <stdexcept>\n\n#include \"ggml.h\"\n\n#include \"ops.h\"\n\nstat"
  },
  {
    "path": "ops.h",
    "chars": 816,
    "preview": "#pragma once\n\n#include \"ggml.h\"\n\nstruct ggml_tensor *pad_1d(struct ggml_context *ctx0, struct ggml_tensor *inp,\n        "
  },
  {
    "path": "quantizer.h",
    "chars": 3593,
    "preview": "#pragma once\n\n#include <cassert>\n#include <vector>\n\n#include \"ggml.h\"\n#include \"ggml-alloc.h\"\n#include \"ggml-backend.h\"\n"
  },
  {
    "path": "utils.h",
    "chars": 995,
    "preview": "#pragma once\n\n#include <cstddef>\n\n#define MAX(a, b) ((a) > (b) ? (a) : (b))\n#define MIN(a, b) ((a) < (b) ? (a) : (b))\n\nc"
  }
]

About this extraction

This page contains the full source code of the PABannier/encodec.cpp GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 27 files (1.2 MB), approximately 323.1k tokens, and a symbol index with 818 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!