main 97f11d12d5da cached
195 files
2.3 MB
618.6k tokens
2504 symbols
1 requests
Download .txt
Showing preview only (2,473K chars total). Download the full file or copy to clipboard to get everything.
Repository: timescale/timescaledb-toolkit
Branch: main
Commit: 97f11d12d5da
Files: 195
Total size: 2.3 MB

Directory structure:
gitextract_o1olp053/

├── .cargo/
│   └── config
├── .dockerignore
├── .git-blame-ignore-revs
├── .github/
│   ├── ISSUE_TEMPLATE/
│   │   ├── bug-report.md
│   │   ├── feature-request.md
│   │   ├── feature-stabilization.md
│   │   └── proposed-feature.md
│   └── workflows/
│       ├── add-to-bugs-board.yml
│       ├── ci.yml
│       ├── ci_image_build.yml
│       ├── clippy_rustfmt.yml
│       ├── dependency-updates.yml
│       ├── packaging.yml
│       ├── release.yml
│       └── report_packaging_failures.yml
├── .gitignore
├── Cargo.toml
├── Changelog.md
├── LICENSE
├── NOTICE
├── Readme.md
├── crates/
│   ├── aggregate_builder/
│   │   ├── Cargo.toml
│   │   ├── Readme.md
│   │   └── src/
│   │       └── lib.rs
│   ├── asap/
│   │   ├── Cargo.toml
│   │   └── src/
│   │       ├── fft.rs
│   │       └── lib.rs
│   ├── count-min-sketch/
│   │   ├── Cargo.toml
│   │   ├── src/
│   │   │   └── lib.rs
│   │   └── tests/
│   │       └── lib.rs
│   ├── counter-agg/
│   │   ├── Cargo.toml
│   │   └── src/
│   │       ├── lib.rs
│   │       ├── range.rs
│   │       └── tests.rs
│   ├── encodings/
│   │   ├── Cargo.toml
│   │   └── src/
│   │       └── lib.rs
│   ├── flat_serialize/
│   │   ├── Readme.md
│   │   ├── example_generated.rs
│   │   ├── flat_serialize/
│   │   │   ├── Cargo.toml
│   │   │   └── src/
│   │   │       └── lib.rs
│   │   └── flat_serialize_macro/
│   │       ├── Cargo.toml
│   │       └── src/
│   │           ├── lib.rs
│   │           └── parser.rs
│   ├── hyperloglogplusplus/
│   │   ├── Cargo.toml
│   │   └── src/
│   │       ├── dense.rs
│   │       ├── hyperloglog_data.rs
│   │       ├── lib.rs
│   │       ├── registers.rs
│   │       ├── sparse/
│   │       │   └── varint.rs
│   │       └── sparse.rs
│   ├── scripting-utilities/
│   │   ├── Readme.md
│   │   ├── control_file_reader/
│   │   │   ├── Cargo.toml
│   │   │   └── src/
│   │   │       └── lib.rs
│   │   └── postgres_connection_configuration/
│   │       ├── Cargo.toml
│   │       └── src/
│   │           └── lib.rs
│   ├── stats-agg/
│   │   ├── Cargo.toml
│   │   └── src/
│   │       ├── lib.rs
│   │       ├── stats1d.rs
│   │       ├── stats2d/
│   │       │   └── stats2d_flat_serialize.rs
│   │       └── stats2d.rs
│   ├── t-digest/
│   │   ├── Cargo.toml
│   │   └── src/
│   │       └── lib.rs
│   ├── t-digest-lib/
│   │   ├── Cargo.toml
│   │   └── src/
│   │       └── lib.rs
│   ├── time-weighted-average/
│   │   ├── Cargo.toml
│   │   └── src/
│   │       └── lib.rs
│   ├── tspoint/
│   │   ├── Cargo.toml
│   │   └── src/
│   │       └── lib.rs
│   └── udd-sketch/
│       ├── Cargo.toml
│       └── src/
│           └── lib.rs
├── docker/
│   ├── README.md
│   └── ci/
│       ├── Dockerfile
│       └── setup.sh
├── docs/
│   ├── README.md
│   ├── asap.md
│   ├── client.md
│   ├── counter_agg.md
│   ├── examples/
│   │   ├── tdigest.c
│   │   └── tdigest.py
│   ├── gauge_agg.md
│   ├── hyperloglog.md
│   ├── lttb.md
│   ├── ordered-aggregates.md
│   ├── percentile_approximation.md
│   ├── release.md
│   ├── rolling_average_api_working.md
│   ├── state_agg.md
│   ├── stats_agg.md
│   ├── tdigest.md
│   ├── template.md
│   ├── test_caggs.md
│   ├── test_candlestick_agg.md
│   ├── time_weighted_average.md
│   ├── timeseries.md
│   ├── timeseries_pipeline_elements.md
│   ├── two-step_aggregation.md
│   └── uddsketch.md
├── extension/
│   ├── .gitignore
│   ├── Cargo.toml
│   ├── src/
│   │   ├── accessors/
│   │   │   └── tests.rs
│   │   ├── accessors.rs
│   │   ├── aggregate_builder_tests.rs
│   │   ├── aggregate_utils.rs
│   │   ├── asap.rs
│   │   ├── bin/
│   │   │   └── pgrx_embed.rs
│   │   ├── candlestick.rs
│   │   ├── counter_agg/
│   │   │   └── accessors.rs
│   │   ├── counter_agg.rs
│   │   ├── countminsketch.rs
│   │   ├── datum_utils.rs
│   │   ├── duration.rs
│   │   ├── frequency.rs
│   │   ├── gauge_agg.rs
│   │   ├── heartbeat_agg/
│   │   │   └── accessors.rs
│   │   ├── heartbeat_agg.rs
│   │   ├── hyperloglog.rs
│   │   ├── lib.rs
│   │   ├── lttb.rs
│   │   ├── nmost/
│   │   │   ├── max_by_float.rs
│   │   │   ├── max_by_int.rs
│   │   │   ├── max_by_time.rs
│   │   │   ├── max_float.rs
│   │   │   ├── max_int.rs
│   │   │   ├── max_time.rs
│   │   │   ├── min_by_float.rs
│   │   │   ├── min_by_int.rs
│   │   │   ├── min_by_time.rs
│   │   │   ├── min_float.rs
│   │   │   ├── min_int.rs
│   │   │   └── min_time.rs
│   │   ├── nmost.rs
│   │   ├── palloc.rs
│   │   ├── pg_any_element.rs
│   │   ├── range.rs
│   │   ├── raw.rs
│   │   ├── saturation.rs
│   │   ├── serialization/
│   │   │   ├── collations.rs
│   │   │   ├── functions.rs
│   │   │   └── types.rs
│   │   ├── serialization.rs
│   │   ├── stabilization_info.rs
│   │   ├── stabilization_tests.rs
│   │   ├── state_aggregate/
│   │   │   ├── accessors.rs
│   │   │   └── rollup.rs
│   │   ├── state_aggregate.rs
│   │   ├── stats_agg.rs
│   │   ├── tdigest.rs
│   │   ├── time_vector/
│   │   │   ├── iter.rs
│   │   │   ├── pipeline/
│   │   │   │   ├── aggregation.rs
│   │   │   │   ├── arithmetic.rs
│   │   │   │   ├── delta.rs
│   │   │   │   ├── expansion.rs
│   │   │   │   ├── fill_to.rs
│   │   │   │   ├── filter.rs
│   │   │   │   ├── lambda/
│   │   │   │   │   ├── executor.rs
│   │   │   │   │   ├── lambda_expr.pest
│   │   │   │   │   └── parser.rs
│   │   │   │   ├── lambda.rs
│   │   │   │   ├── map.rs
│   │   │   │   └── sort.rs
│   │   │   └── pipeline.rs
│   │   ├── time_vector.rs
│   │   ├── time_weighted_average/
│   │   │   └── accessors.rs
│   │   ├── time_weighted_average.rs
│   │   ├── type_builder.rs
│   │   ├── uddsketch.rs
│   │   └── utilities.rs
│   └── timescaledb_toolkit.control
├── tests/
│   └── update/
│       ├── candlestick.md
│       ├── heartbeat.md
│       ├── original_update_tests.md
│       ├── state_agg.md
│       ├── time-vector.md
│       └── time-weighted-average.md
└── tools/
    ├── build
    ├── dependencies.sh
    ├── install-timescaledb
    ├── post-install/
    │   ├── Cargo.toml
    │   └── src/
    │       ├── main.rs
    │       └── update_script.rs
    ├── release
    ├── sql-doctester/
    │   ├── Cargo.toml
    │   ├── Readme.md
    │   └── src/
    │       ├── main.rs
    │       ├── parser.rs
    │       ├── runner.rs
    │       └── startup.sql
    ├── testbin
    └── update-tester/
        ├── Cargo.toml
        ├── Readme.md
        └── src/
            ├── installer.rs
            ├── main.rs
            ├── parser.rs
            ├── testrunner/
            │   └── stabilization.rs
            └── testrunner.rs

================================================
FILE CONTENTS
================================================

================================================
FILE: .cargo/config
================================================
[build]
# Postgres symbols won't ve available until runtime
rustflags = ["-C", "link-args=-Wl,-undefined,dynamic_lookup"]


================================================
FILE: .dockerignore
================================================
**/*.iml
**/*.o
**/.DS_Store
.editorconfig
.idea
.vscode
.vsls.json
.git
old-versions
target
target-analyzer


================================================
FILE: .git-blame-ignore-revs
================================================
# Merge and parent commit for cargo fmt changes
b7433344f90b142094e73e84c332385498db9335
8b50127c9e4bad1696a68a800ce1ef019cf6fc3c


================================================
FILE: .github/ISSUE_TEMPLATE/bug-report.md
================================================
---
name: Bug Report
about: Something is not working as expected
title: ''
labels: bug
assignees: ''

---

**Relevant system information:**
 - OS: [e.g. Ubuntu 16.04, Windows 10 x64, etc]
 - PostgreSQL version (output of `SELECT version();`): [e.g. 12.0, 13.2, etc]
 - TimescaleDB Toolkit version (output of `\dx timescaledb_toolkit` in `psql`): [e.g. 1.0.0]
 - Installation method: [e.g., "Timescale Cloud", "docker", "source"]

**Describe the bug**
A clear and concise description of what the bug is.

**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error

**Expected behavior**
A clear and concise description of what you _expected_ to happen.

**Actual behavior**
A clear and concise description of what _actually_ happened.

**Screenshots**
If applicable, add screenshots to help explain your problem.

**Additional context**
Add any other context about the problem here.


================================================
FILE: .github/ISSUE_TEMPLATE/feature-request.md
================================================
---
name: Feature Request
about: Suggest an idea for this project
title: ''
labels: feature-request
assignees: ''

---

**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]

**Describe the solution you'd like**
A clear and concise description of what you would like to happen.

**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.

**Additional context**
Add any other context or screenshots about the feature request here.


================================================
FILE: .github/ISSUE_TEMPLATE/feature-stabilization.md
================================================
---
name: Feature Stabilization
about: Checklist of tasks to move a feature out of experimental
title: ''
labels: ''
assignees: ''

---

## [\<Feature Name>](<link to root issue for feature>)

**What evidence do we have the feature is being used**

**Why do we feel this feature is ready to be stable**

**Is there any known further work needed on this feature after stabilization**

**Are there any compatibility concerns that may arise during future work on this feature**

### Feature History
- Experimental release version:
- Last version modifying on-disk format:
- Target stabilization version:


### Stabilization checklist:
- [ ] Ensure tests exist for all public API
- [ ] Ensure API documentation exists and is accurate
- [ ] Remove `toolkit_experimental` tags and update test usages
- [ ] Add arrow operators for accessors if applicable
- [ ] Ensure arrow operators have test coverage
- [ ] If present, ensure `combine` and `rollup` are tested
- [ ] Add serialization tests for on disk format
- [ ] Add upgrade tests
- [ ] Add continuous aggregate test
- [ ] Add feature level documentation


================================================
FILE: .github/ISSUE_TEMPLATE/proposed-feature.md
================================================
---
name: Proposed Feature
about: Propose a solution to a problem or wishlist item
title: ''
labels: proposed-feature
assignees: ''

---

## What's the functionality you would like to add ##
A clear and concise description of what you want to happen.

## How would the function be used ##
Give an example of what a workflow using the function would look like

## Why should this feature be added?  ##
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
Is your feature request related to a problem? A wishlist item?

### What scale is this useful at? ###
Is this useful for large data sets? Small ones? Medium sized?

## Drawbacks ##
Are there any issues with this particular solution to the problem?

## Open Questions ##
Are any questions we'd need to address before releasing this feature?

## Alternatives ##
Are there any alternative to the solutions chosen in the above text? Are there any other issues competing with this one?


================================================
FILE: .github/workflows/add-to-bugs-board.yml
================================================
name: Add bugs to bugs project

"on":
  issues:
    types: [opened, labeled]
  issue_comment:
    types: [created, edited]

jobs:
  add-to-project:
    name: Add issue to project
    runs-on: ubuntu-latest
    steps:
      - uses: actions/add-to-project@v1.0.2
        with:
          project-url: https://github.com/orgs/timescale/projects/55
          github-token: ${{ secrets.ORG_AUTOMATION_TOKEN }}


  waiting-for-author:
    name: Waiting for Author
    runs-on: ubuntu-latest
    if: github.event_name == 'issues' && github.event.action == 'labeled'
      && github.event.label.name == 'waiting-for-author'
    steps:
      - uses: leonsteinhaeuser/project-beta-automations@v2.2.1
        with:
          gh_token: ${{ secrets.ORG_AUTOMATION_TOKEN }}
          organization: timescale
          project_id: 55
          resource_node_id: ${{ github.event.issue.node_id }}
          status_value: 'Waiting for Author'

  waiting-for-engineering:
    name: Waiting for Engineering
    runs-on: ubuntu-latest
    if: github.event_name == 'issue_comment' && !github.event.issue.pull_request
      && contains(github.event.issue.labels.*.name, 'waiting-for-author')
    steps:
      - name: Check if organization member
        uses: tspascoal/get-user-teams-membership@v3
        id: checkUserMember
        with:
         username: ${{ github.actor }}
         organization: timescale
         team: 'database-eng'
         GITHUB_TOKEN: ${{ secrets.ORG_AUTOMATION_TOKEN }}
      - name: Remove waiting-for-author label
        if: ${{ steps.checkUserMember.outputs.isTeamMember == 'false' }}
        uses: andymckay/labeler@3a4296e9dcdf9576b0456050db78cfd34853f260
        with:
          remove-labels: 'waiting-for-author, no-activity'
          repo-token: ${{ secrets.ORG_AUTOMATION_TOKEN }}
      - name: Move to waiting for engineering column
        if: ${{ steps.checkUserMember.outputs.isTeamMember == 'false' }}
        uses: leonsteinhaeuser/project-beta-automations@v2.2.1
        with:
          gh_token: ${{ secrets.ORG_AUTOMATION_TOKEN }}
          organization: timescale
          project_id: 55
          resource_node_id: ${{ github.event.issue.node_id }}
          status_value: 'Waiting for Engineering'


================================================
FILE: .github/workflows/ci.yml
================================================
name: CI
on:
  pull_request:
  push:
    branches:
    - main
    - staging
    - trying
  schedule:
    # TimescaleDB integration: 8am UTC, 3am Eastern, midnight Pacific
    - cron: '0 8 * * 1-4'
    # Testing on every platform: 6am UTC, 1am Eastern, 10pm Pacific
    - cron: '0 6 * * 1-4'
  workflow_dispatch:
    inputs:
      container-image:
        description: 'Container image to pull from DockerHub'
        required: false
      tsdb-commit:
        description: 'TimescaleDB commit to use'
        default: ''
        required: false
      tsdb-repo:
        description: 'TimescaleDB repo to use'
        default: 'https://github.com/timescale/timescaledb.git'
        required: false
      all-platforms:
        description: 'Test all platforms'
        type: boolean
        default: false

jobs:
  testpostgres:
    name: Test Postgres
    runs-on: ${{ contains(matrix.container.image, 'arm64') && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }}
    container:
      image: ${{ inputs.container-image || 'timescaledev/toolkit-builder-test' }}:${{ matrix.container.image }}
    strategy:
      fail-fast: false
      max-parallel: 12
      matrix:
        pgversion: [15, 16, 17, 18]
        container:
        - os: rockylinux
          version: "9"
          image: rockylinux-9-x86_64
          schedule: true
        - os: debian
          version: "13"
          image: debian-13-arm64
          schedule: true
        - os: debian
          version: "13"
          image: debian-13-amd64
          schedule: true
        - os: debian
          version: "12"
          image: debian-12-arm64
          schedule: ${{ inputs.all-platforms || ( github.event_name == 'schedule' && github.event.schedule == '0 6 * * 1-4' ) }}
        - os: debian
          version: "12"
          image: debian-12-amd64
          schedule: ${{ inputs.all-platforms || ( github.event_name == 'schedule' && github.event.schedule == '0 6 * * 1-4' ) }}
        - os: debian
          version: "11"
          image: debian-11-amd64
          schedule: ${{ inputs.all-platforms || ( github.event_name == 'schedule' && github.event.schedule == '0 6 * * 1-4' ) }}
        - os: ubuntu
          version: "24.04"
          image: ubuntu-24.04-amd64
          schedule: true
        - os: ubuntu
          version: "22.04"
          image: ubuntu-22.04-amd64
          schedule: ${{ inputs.all-platforms || ( github.event_name == 'schedule' && github.event.schedule == '0 6 * * 1-4' ) }}
        exclude:
        - container:
            skip: true
        - container:
            schedule: false
    env:
      # TODO Why?  Cargo default is to pass `-C incremental` to rustc; why don't we want that?
      #   https://doc.rust-lang.org/rustc/codegen-options/index.html#incremental
      #   Well turning it off takes the extension target size down from 3G to 2G...
      CARGO_INCREMENTAL: 0
      # TODO Why?  If we're concerned about trouble fetching crates, why not
      #  just fetch them once at the time we select a dependency?
      #  Errors fetching crates are probably rare enough that we don't see the
      #  need to bother, but then why not just let the build fail?
      CARGO_NET_RETRY: 10
      # TODO What reads this?  It's not listed on
      #  https://doc.rust-lang.org/cargo/reference/environment-variables.html
      CI: 1
      RUST_BACKTRACE: short

    steps:
    - name: Checkout Repository
      uses: actions/checkout@v5
      with:
        ref: ${{ github.event.pull_request.head.sha }}
        token: ${{ secrets.GITHUB_TOKEN }}

    # Github Actions provides a bind mounted working directory for us, where
    # the above checkout happens, and where caches are read from and restored
    # to, and it's all owned by 1001.  Our container image is `USER root` so
    # we have no problem writing anywhere, but we run some things as user
    # 'postgres', which used to be user 1000 but is now 1001.  Hoping in the
    # future to make our container image `USER postgres` and further simplify
    # this file and the packaging Actions file, but it's non-trivial.
    - name: chown Repository
      run: |
        chown -R postgres .

    - name: Build and install TimescaleDB
      if: ${{ (github.event_name == 'schedule' && github.event.schedule == '0 8 * * 1-4') || inputs.tsdb-commit != '' }}
      run: ./tools/install-timescaledb '${{ matrix.pgversion }}' '${{ inputs.tsdb-repo || 'https://github.com/timescale/timescaledb.git' }}' '${{ inputs.tsdb-commit == '' && 'main' || matrix.tsdb_commit || inputs.tsdb-commit }}'

    # TODO After the container image contains a primed target dir, is this still worth it?
    #   Only possible advantage is this one is per-pg-version but what's the impact?
    - name: Cache cargo target dir
      uses: actions/cache@v4
      if: ${{ matrix.container.image == 'debian-11-amd64' }}
      with:
        path: target
        key: ${{ runner.os }}-test-pg${{ matrix.pgversion }}-target-${{ hashFiles('Cargo.lock', '.github/workflows/ci.yml') }}
        restore-keys: ${{ runner.os }}-test-pg${{ matrix.pgversion }}-target-  

    # Packages not 
    #

    - name: Run pgrx tests
      run: |
        if [ "${{ matrix.container.version }}" = 7 ]; then
          # needed for pgrx to find clang
          set +e # will succeed but have non-zero exit code
          . scl_source enable llvm-toolset-7
          set -e
        fi

        su postgres -c 'sh tools/build -pg${{ matrix.pgversion }} test-extension 2>&1'

    - name: Run doc tests
      # depends on TSDB, which requires PG >=13
      if: ${{ matrix.pgversion >= 13 && (matrix.pgversion >= 13 && (matrix.pgversion <= 15 || ((github.event_name == 'schedule' && github.event.schedule == '0 8 * * 1-4') || inputs.tsdb-commit != ''))) }}
      run: su postgres -c 'sh tools/build -pg${{ matrix.pgversion }} test-doc 2>&1'

    - name: Run binary update tests (deb)
      # depends on TSDB, which requires PG >=13
      if: ${{ (matrix.container.os == 'debian' || matrix.container.os == 'ubuntu') && (matrix.pgversion >= 13 && (matrix.pgversion <= 16 || ((github.event_name == 'schedule' && github.event.schedule == '0 8 * * 1-4') || inputs.tsdb-commit != ''))) }}
      run: |
        su postgres -c 'OS_NAME=${{ matrix.container.os }} OS_VERSION=${{ matrix.container.version }} tools/testbin -version no -bindir / -pgversions ${{ matrix.pgversion }} ci 2>&1'    
    - name: Run binary update tests (EL)
      if: ${{ (matrix.container.os == 'rockylinux') && matrix.container.version != '9' && (matrix.pgversion >= 13 && (matrix.pgversion <= 16 || ((github.event_name == 'schedule' && github.event.schedule == '0 8 * * 1-4') || inputs.tsdb-commit != ''))) }}
      run: |
        su postgres -c 'OS_NAME=${{ matrix.container.os }} OS_VERSION=${{ matrix.container.version }} tools/testbin -version no -bindir / -pgversions ${{ matrix.pgversion }} rpm_ci 2>&1'

  testcrates:
    name: Test Crates
    runs-on: ubuntu-24.04
    container:
      image: ${{ inputs.container-image || 'timescaledev/toolkit-builder' }}:debian-11-amd64
      env:
        CARGO_INCREMENTAL: 0
        CARGO_NET_RETRY: 10
        CI: 1
        RUST_BACKTRACE: short

    steps:
    - name: Checkout Repository
      uses: actions/checkout@v5
      with:
        ref: ${{ github.event.pull_request.head.sha }}

    - name: chown Repository
      run: chown -R postgres .

    - name: Cache cargo target dir
      uses: actions/cache@v4
      with:
        path: target
        key: ${{ runner.os }}-test-crates-target-${{ hashFiles('Cargo.lock', '.github/workflows/ci.yml') }}
        restore-keys: ${{ runner.os }}-test-crates-target-

    - name: Run Crates Tests
      run: su postgres -c 'sh tools/build test-crates 2>&1'


================================================
FILE: .github/workflows/ci_image_build.yml
================================================
name: Build CI Image

on:
  pull_request:
    paths:
      - 'docker/ci/**'
      - '.github/workflows/ci_image_build.yml'
      - 'tools/dependencies.sh'
  workflow_dispatch:
    inputs:
      tag-base:
        description: 'Push image to DockerHub with this base tag (remove "-test" enable)'
        required: false
        # Repeating the default here for ease of editing in the github actions form.  Keep in sync with below.
        default: timescaledev/toolkit-builder-test
      toolkit-commit:
        description: 'Toolkit commit (branch, tag, etc.) to build image from'
        required: false
        default: main
      builder-commit:
        description: 'Commit (branch, tag, etc.) on release-build-scripts repository to use'
        required: false

jobs:
  build:
    env:
      GITHUB_TOKEN: ${{ secrets.ORG_AUTOMATION_TOKEN}}
    runs-on: ubuntu-24.04
    steps:
      - name: Run release-build-scripts job
        # Repeating the default here for 'pull_request'.  Keep in sync with above.
        run: |
          echo "toolkit-commit: ${{ inputs.toolkit-commit || github.event.pull_request.head.sha }}"
          echo "builder: ${{ inputs.builder-commit || 'main' }}"
          echo "tag-base: ${{ inputs.tag-base || 'timescaledev/toolkit-builder-test' }}"
          gh workflow run toolkit-image.yml \
                -R timescale/release-build-scripts \
                -r ${{ inputs.builder-commit || 'main' }} \
                -f tag-base=${{ inputs.tag-base || 'timescaledev/toolkit-builder-test' }} \
                -f toolkit-commit=${{ inputs.toolkit-commit || github.event.pull_request.head.sha }}


================================================
FILE: .github/workflows/clippy_rustfmt.yml
================================================
name: Clippy and rustfmt
on:
  pull_request:
  push:
    branches:
    - main
    - staging
    - trying
  workflow_dispatch:
    inputs:
      container-image:
        description: 'Container image to pull from DockerHub'
        required: false

jobs:
  clippy:
    name: Clippy/rustfmt Test
    runs-on: ubuntu-24.04
    container:
      # Duplicated from ci.yml
      image: ${{ inputs.container-image || 'timescaledev/toolkit-builder-test:debian-11-amd64' }}
      env:
        # TODO: See TODOs on duplicate block in ci.yml
        CARGO_INCREMENTAL: 0
        CARGO_NET_RETRY: 10
        CI: 1
        RUST_BACKTRACE: short

    steps:
    - name: Checkout Repository
      uses: actions/checkout@v5
      with:
        ref: ${{ github.event.pull_request.head.sha }}

    - name: chown Repository
      run: chown -R postgres .

    - name: Cache cargo target dir
      uses: actions/cache@v4
      with:
        path: target
        key: ${{ runner.os }}-clippy-target-${{ hashFiles('Cargo.lock', '.github/workflows/clippy_rustfmt.yml') }}
        restore-keys: ${{ runner.os }}-clippy-target-

    - name: Run Clippy
      # Github captures stdout and stderr separately and then intermingles them
      # in the wrong order.  We don't actually care to distinguish, so redirect
      # stderr to stdout so we get the proper order.
      run: su postgres -c 'sh tools/build clippy 2>&1'

    - name: Verify formatting
      run: su postgres -c 'cargo fmt --check 2>&1'


================================================
FILE: .github/workflows/dependency-updates.yml
================================================
name: Dependency Updates
on:
  schedule:
    # Run on the 1st of every month at 9:00 AM UTC
    - cron: '0 9 1 * *'
  workflow_dispatch:

env:
  CARGO_TERM_COLOR: always

jobs:
  update-dependencies:
    runs-on: ubuntu-latest
    steps:
    - uses: actions/checkout@v5
      with:
        token: ${{ secrets.GITHUB_TOKEN }}

    - uses: dtolnay/rust-toolchain@stable
      with:
        toolchain: stable

    - name: Cache cargo registry and index
      uses: actions/cache@v4
      with:
        path: |
          ~/.cargo/registry/index/
          ~/.cargo/registry/cache/
          ~/.cargo/git/db/
        key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
        restore-keys: |
          ${{ runner.os }}-cargo-

    - name: Update dependencies
      run: |
        # Update dependencies and capture the output
        cargo update --verbose > update_output.txt 2>&1 || true
        
        # Check if Cargo.lock was modified
        if git diff --quiet Cargo.lock; then
          echo "NO_UPDATES=true" >> $GITHUB_ENV
          echo "No dependency updates available"
        else
          echo "NO_UPDATES=false" >> $GITHUB_ENV
          echo "Dependencies updated, changes detected in Cargo.lock"
        fi

    - name: Run cargo check
      if: env.NO_UPDATES == 'false'
      run: cargo check --all-targets --verbose

    - name: Run tests
      if: env.NO_UPDATES == 'false'
      run: cargo test --verbose

    - name: Generate update summary
      if: env.NO_UPDATES == 'false'
      run: |
        echo "## 📦 Monthly Dependency Updates" > pr_body.md
        echo "" >> pr_body.md
        echo "This PR contains automated dependency updates for $(date +'%B %Y')." >> pr_body.md
        echo "" >> pr_body.md
        echo "### Changes:" >> pr_body.md
        echo "\`\`\`" >> pr_body.md
        cat update_output.txt >> pr_body.md
        echo "\`\`\`" >> pr_body.md
        echo "" >> pr_body.md
        echo "### Verification:" >> pr_body.md
        echo "- ✅ \`cargo check\` passed" >> pr_body.md
        echo "- ✅ \`cargo test\` passed" >> pr_body.md
        echo "" >> pr_body.md
        echo "---" >> pr_body.md
        echo "*This PR was automatically created by the monthly dependency update workflow.*" >> pr_body.md

    - name: Create Pull Request
      if: env.NO_UPDATES == 'false'
      uses: peter-evans/create-pull-request@v6
      with:
        token: ${{ secrets.GITHUB_TOKEN }}
        commit-message: "chore: update dependencies for $(date +'%B %Y')"
        title: "chore: Monthly dependency updates - $(date +'%B %Y')"
        body-path: pr_body.md
        branch: dependency-updates/$(date +'%Y-%m')
        delete-branch: true
        labels: |
          dependencies
          automated
        assignees: ${{ github.repository_owner }}
        draft: false

    - name: Summary
      run: |
        if [ "$NO_UPDATES" = "true" ]; then
          echo "✅ No dependency updates needed - all dependencies are up to date!"
        else
          echo "✅ Dependency update PR created successfully!"
        fi


================================================
FILE: .github/workflows/packaging.yml
================================================
# Trigger package workflows on release tagging
name: Build packages
on:
  push:
    tags:
    - "[0-9]+.[0-9]+.[0-9]+"
  workflow_dispatch:

jobs:
  package:
    env:
      GITHUB_TOKEN: ${{ secrets.ORG_AUTOMATION_TOKEN }}
    runs-on: ubuntu-24.04
    steps:
      - name: Set env
        run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV

      - name: Debian and Ubuntu packages
        if: always()
        run: |
          gh workflow run toolkit-apt.yml -R timescale/release-build-scripts -r main -f version=${{ env.RELEASE_VERSION }} -f upload-artifacts=true

      - name: RPM packages
        if: always()
        run: |
          gh workflow run toolkit-rpm.yml -R timescale/release-build-scripts -r main -f version=${{ env.RELEASE_VERSION }} -f upload-artifacts=true



================================================
FILE: .github/workflows/release.yml
================================================
name: Release
on:
  workflow_dispatch:
    inputs:
      version:
        description: 'New version number for release'
        required: true
      commit:
        description: 'Commit id to branch from (default is HEAD of main)'
        type: string
        required: false
        default: main
      # TODO Make this harder to screw up by making a checkbox.
      dry-run:
        description: '-n for dry-run, -push to really release'
        type: string
        required: false
        default: -n

jobs:
  release:
    name: Release
    runs-on: ubuntu-24.04
    container:
      image: timescaledev/toolkit-builder-test:debian-11-amd64

    steps:
    - name: Checkout Repository
      uses: actions/checkout@v5
      with:
        ref: ${{ inputs.commit }}

    - name: chown Repository
      run: chown -R postgres .

    - name: Install dependencies not yet in image
      run: su postgres -c 'tools/release setup' 2>&1

    - name: Run tools/release
      env:
        GITHUB_TOKEN: ${{ secrets.ORG_AUTOMATION_TOKEN }}
        ACTIONS_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
      run: su postgres -c 'tools/release ${{ inputs.dry-run }} -version ${{ inputs.version }} ${{ inputs.commit }}' 2>&1


================================================
FILE: .github/workflows/report_packaging_failures.yml
================================================
name: Report Build Package Failures
on:
  workflow_run:
    workflows: [Build packages, Build CI Image, CI]
    types: [completed]

jobs:
  on-failure:
    runs-on: ubuntu-24.04
    if: ${{ github.event.workflow_run.conclusion != 'success' && github.event.workflow_run.event != 'pull_request' }}
    steps:
      - name: slack-send
        uses: slackapi/slack-github-action@v1.19.0
        with:
          payload: |
            {
              "blocks": [
                {
                  "type": "section",
                  "text": {
                    "type": "mrkdwn",
                    "text": "Workflow run <${{ github.event.workflow_run.html_url }}|${{ github.event.workflow.name}}#${{ github.event.workflow_run.run_number }}>"
                  }
                },
                {
                  "type": "section",
                  "fields": [
                    {
                      "type": "mrkdwn",
                      "text": "*Status*\n`${{ github.event.workflow_run.conclusion }}`"
                    },
                    {
                      "type": "mrkdwn",
                      "text": "*Triggered By*\n<${{ github.event.sender.html_url }}|${{ github.event.sender.login }}>"
                    }
                  ]
                }
              ]
            }
        env:
          SLACK_WEBHOOK_URL: ${{ secrets.SLACK_NOTIFY_WEBHOOK_URL }}
          SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK


================================================
FILE: .gitignore
================================================
.DS_Store
/.idea
/.vscode
/.vsls.json
/old-versions
/target
*.iml
/target-analyzer
/.editorconfig


================================================
FILE: Cargo.toml
================================================
[workspace]
resolver = "2"

members = [
    "crates/t-digest-lib",
    "extension",
    "tools/post-install",
    "tools/sql-doctester",
    "tools/update-tester",
]

[profile.release]
lto = "fat"
debug = true
codegen-units = 1


================================================
FILE: Changelog.md
================================================
# Toolkit Changelog

## Process for updating this changelog

This changelog should be updated as part of a PR if the work is worth noting (most of them should be). If unsure, always add an entry here for any PR targeted for the next release. It's easier to remove than add an entry at final review time for the next release.

## Next Release (Date TBD)

#### New experimental features

#### Bug fixes

#### Other notable changes

#### Shout-outs

**Full Changelog**: [TODO]

## [1.21.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.21.0) (2025-04-17)

#### New experimental features

#### Bug fixes

#### Other notable changes
- [#847](https://github.com/timescale/timescaledb-toolkit/pull/847): Added `total` accessor for tdigest and uddsketch
- [#853](https://github.com/timescale/timescaledb-toolkit/pull/853): Performance improvements for `UDDSketch`

#### Shout-outs

**Full Changelog**: [TODO]

## [1.19.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.19.0) (2024-11-14)

#### New experimental features

#### Bug fixes

#### Other notable changes

#### Shout-outs

**Full Changelog**: [TODO]

## [1.18.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.18.0) (2023-11-28)

#### New experimental features

- [#776](https://github.com/timescale/timescaledb-toolkit/pull/776): PostgreSQL 16 support

**Full Changelog**: [TODO]

## [1.17.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.17.0) (2023-07-06)

#### New experimental features

#### Bug fixes
- [#761](https://github.com/timescale/timescaledb-toolkit/pull/761): Make sure nmost combine uses correct memctx

#### Other notable changes

#### Shout-outs

**Full Changelog**: [TODO]

## [1.16.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.16.0) (2023-04-05)

#### Bug fixes
- [#733](https://github.com/timescale/timescaledb-toolkit/pull/733): Fix a bug when rolling up overlapping heartbeat_aggs
- [#740](https://github.com/timescale/timescaledb-toolkit/pull/740): When interpolating an 'locf' time weighted average, extend last point to interpolation boundary
- [#742](https://github.com/timescale/timescaledb-toolkit/pull/742): Ignore incoming NULL values in hyperloglog rollup

#### Stabilized features
- [#741](https://github.com/timescale/timescaledb-toolkit/pull/741): Stabilize `approx_count_distinct`
- [#748](https://github.com/timescale/timescaledb-toolkit/pull/748): Stabilize `approx_percentile_array`
- [#745](https://github.com/timescale/timescaledb-toolkit/pull/745): Stabilize date utility functions
- [#751](https://github.com/timescale/timescaledb-toolkit/pull/751): Stabilize `min_n`/`max_n`/`min_n_by`/`max_n_by`
- [#752](https://github.com/timescale/timescaledb-toolkit/pull/752): Stabilize `mcv_agg`, this was previously our `topn_agg`

#### Other notable changes
- [#743](https://github.com/timescale/timescaledb-toolkit/pull/743): Remove support for direct upgrades from toolkit versions more than 1 year old. Toolkit versions 1.4.x and 1.5.x will have to upgrade to an intermediate version before upgrading to 1.16.0.
- [#744](https://github.com/timescale/timescaledb-toolkit/pull/744): Fix nightly CI failures from building TimescaleDB on Enterprise Linux
- [#749](https://github.com/timescale/timescaledb-toolkit/pull/749): Added num_live_ranges, num_gaps, and trim_to accessors for heartbeat aggregates

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/1.15.0...1.16.0

## [1.15.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.15.0) (2023-03-08)

#### New experimental features

#### Bug fixes
- [#715](https://github.com/timescale/timescaledb-toolkit/pull/715): Fix out-of-bounds indexing error in `state_agg` rollup

#### Stabilized features
- [#722](https://github.com/timescale/timescaledb-toolkit/pull/722): Stabilize heartbeat aggregate.
- [#724](https://github.com/timescale/timescaledb-toolkit/pull/724): Stabilize integral and interpolated_integral for time-weighted-average.
- [#723](https://github.com/timescale/timescaledb-toolkit/pull/723): Stabilized `state_agg`

#### Other notable changes
- [#716](https://github.com/timescale/timescaledb-toolkit/issues/716): Add arrow operator support for counter aggregate and time-weighted aggregate interpolated accessors.
- [#716](https://github.com/timescale/timescaledb-toolkit/issues/716): Remove experimental versions of interpolated accessors for counter aggregate and time-weighted aggregates.  The stable versions introduced in 1.14.0 should be used instead.
- [#723](https://github.com/timescale/timescaledb-toolkit/pull/723): Added `state_at` function for `state_agg`
- [#709](https://github.com/timescale/timescaledb-toolkit/pull/709): Updated pgx version to 0.7.1

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/1.14.0...1.15.0

## [1.14.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.14.0) (2023-02-09)

#### New experimental features

#### Bug fixes
- [#660](https://github.com/timescale/timescaledb-toolkit/issues/660): Heartbeat aggregate rollup should interpolate aggregates
- [#679](https://github.com/timescale/timescaledb-toolkit/issues/679): Heartbeat agg rollup producing invalid aggregates.

#### Stabilized features
- [#701](https://github.com/timescale/timescaledb-toolkit/pull/701): Stabilize candlestick.
- [#650](https://github.com/timescale/timescaledb-toolkit/pull/650): Stabilize interpolated_delta & interpolated_rate for counter aggregate, and interpolated_average for time-weighted aggregate.

#### Other notable changes
- [#685](https://github.com/timescale/timescaledb-toolkit/issues/685): rollup for freq_agg and topn_agg
- [#692](https://github.com/timescale/timescaledb-toolkit/pull/692): Support specifying a range to `duration_in` to specify a time range to get states in for state aggregates
- [#692](https://github.com/timescale/timescaledb-toolkit/pull/692): Removed `next` parameter from interpolated state aggregate functions
- [#692](https://github.com/timescale/timescaledb-toolkit/pull/692): Renamed `state_agg` to `compact_state_agg` and `timeline_agg` to `state_agg`
- [#699](https://github.com/timescale/timescaledb-toolkit/pull/699): `interpolated_duration_in`/`duration_in`/`interpolated_state_periods`/`state_periods` have the first two arguments swapped: now the aggregate is first and the state is second
- [#699](https://github.com/timescale/timescaledb-toolkit/pull/699): `into_values`/`into_int_values` now returns a table with intervals instead of microseconds

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/1.13.1...1.14.0

## [1.13.1](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.13.1) (2023-01-03)

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/1.13.0...1.13.1

- [#664](https://github.com/timescale/timescaledb-toolkit/pull/664) Support PostgreSQL 15.

## [1.13.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.13.0) (2022-12-13)

#### New experimental features
- [#615](https://github.com/timescale/timescaledb-toolkit/pull/615): Heartbeat aggregate

  Users can use the new `heartbeat_agg(timestamp, start_time, agg_interval, heartbeat_interval)` to track the liveness of a system in the range (`start_time`, `start_time` + `agg_interval`). Each timestamp seen in that range is assumed to indicate system liveness for the following `heartbeat_interval`.

  Once constructed, users can query heartbeat aggregates for `uptime` and `downtime`, as well as query for `live_ranges` or `dead_ranges`. Users can also check for `live_at(timestamp)`.

  Heartbeat aggregates can also interpolated to better see behavior around the boundaries of the individual aggregates.

- [#620](https://github.com/timescale/timescaledb-toolkit/pull/620): Expose TDigest type

  This is a prototype for building `TDigest` objects client-side, for `INSERT` into tables.

  This is a lightly tested prototype; try it out at your own risk!

  [Examples](docs/examples/)

- [#635](https://github.com/timescale/timescaledb-toolkit/pull/635): AsOf joins for timevectors

  This allows users to join two timevectors with the following semantics `timevectorA -> asof(timevectorB)`. This will return records with the LOCF value from timevectorA at the timestamps from timevectorB. Specifically the returned records contain, for each value in timevectorB, {the LOCF value from timevectorA, the value from timevectorB, the timestamp from timevectorB}.

- [#609](https://github.com/timescale/timescaledb-toolkit/pull/609): New `approx_percentile_array()` function

  Users can use the new `toolkit_experimental.approx_percentile_array(percentiles)` to generate an array of percentile results instead of having to call and rebuild the aggregate multiple times.

- [#636](https://github.com/timescale/timescaledb-toolkit/pull/636): New `timeline_agg` aggregate, which is similar to `state_agg` but tracks the entire state timeline instead of just the duration in each state.

- [#640](https://github.com/timescale/timescaledb-toolkit/pull/640): Support `rollup` for `state_agg` and `timeline_agg`.
- [#640](https://github.com/timescale/timescaledb-toolkit/pull/640): Support integer states for `state_agg` and `timeline_agg`.

- [#638](https://github.com/timescale/timescaledb-toolkit/pull/638): Introducing Time Vector Templates.

Users can use the new experimental function `toolkit_experimental.to_text(timevector(time, value),format_string)` to render a formatted text representation of their time vector series. These changes also include `toolkit_experimental.to_plotly(timevector(time, value))`, which will render your time vector series in a format suitable for use with plotly.

#### Bug fixes
- [#644](https://github.com/timescale/timescaledb-toolkit/pull/644): Fix bug in Candlestick aggregate and reenable partial aggregation.

#### Other notable changes
- [#646](https://github.com/timescale/timescaledb-toolkit/pull/646): Added experimental support for PostgreSQL 15.
- [#621](https://github.com/timescale/timescaledb-toolkit/pull/621): Rocky Linux 9 support

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/1.12.1...1.13.0

## [1.12.1](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.12.1) (2022-11-17)

#### Bug fixes
- [#624](https://github.com/timescale/timescaledb-toolkit/pull/624): Remove partial aggregation for Candlestick aggregates.
  We've determined that the cause for the bad results lives somewhere in the functions that are used to support partial aggregation.
  We can at least prevent folks from running the candlestick aggregates in parallel mode and hitting this bug by dropping support for partial aggregation until we've resolved the issue.

## [1.12.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.12.0) (2022-11-08)

#### New experimental features
- [#596](https://github.com/timescale/timescaledb-toolkit/pull/596): Introduce Candlestick Aggregate.
  Users can use either the `toolkit_experimental.candlestick_agg(timestamp, price, volume)` aggregate or the `toolkit_experimental.candlestick(timestamp, open, high, low, close, volume)` function, depending on whether they are starting from tick data or already aggregated data.
  Both the aggregate form and the function form of `Candlestick` support the following (experimental) accessors (in addition to being re-aggregated via `rollup`):
  `open`, `high`, `low`, `close`, `open_time`, `high_time`, `low_time`, `close_time`, `volume`, `vwap` (Volume Weighted Average Price)
  *NOTE*: This functionality improves upon and replaces the need for `toolkit_experimental.ohlc` which will be removed in the next release.

- [#590](https://github.com/timescale/timescaledb-toolkit/pull/590): New `min_n`/`max_n` functions and related `min_n_by`/`max_n_by`.
  The former is used to get the top N values from a column while the later will also track some additional data, such as another column or even the entire row.
  These should give the same results as a `SELECT ... ORDER BY ... LIMIT n`, except they can be composed and combined like other toolkit aggregates.

#### Bug fixes

- [#568](https://github.com/timescale/timescaledb-toolkit/pull/568): Allow `approx_count` accessor function to take NULL inputs.
- [#574](https://github.com/timescale/timescaledb-toolkit/pull/574): Add default unit to interpolated_integral.

#### Other notable changes

- RPM packages for CentOS 7 have returned.
- New Homebrew formula available for macOS installation: `brew install timescale/tap/timescaledb-toolkit`.
- [#547](https://github.com/timescale/timescaledb-toolkit/pull/547): Update pgx to 0.5.0. This is necessary for adding Postgres 15 support coming soon.
- [#571](https://github.com/timescale/timescaledb-toolkit/pull/571): Update CI docker image for pgx 0.5.0.
- [#599](https://github.com/timescale/timescaledb-toolkit/pull/599): Reduce floating point error when using `stats_agg` in moving aggregate mode.
- [#589](https://github.com/timescale/timescaledb-toolkit/pull/589): Update pgx to 0.5.4.
- [#594](https://github.com/timescale/timescaledb-toolkit/pull/594): Verify that pgx doesn't generate CREATE OR REPLACE FUNCTION.
- [#592](https://github.com/timescale/timescaledb-toolkit/pull/592): Add build script option to install in release mode.

#### Shout-outs

- @zyro for reporting null handling issue on `count_min_sketch`.

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/1.11.0...1.12.0

## [1.11.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.11.0) (2022-09-21)

#### New experimental features

- arm64/aarch64 DEB packages are now available for Ubuntu 20.04 (focal) & 22.04 (jammy), and Debian 10 (buster) & 11 (bulleye).
- [#526](https://github.com/timescale/timescaledb-toolkit/pull/526): Add `integral` and `interpolated_integral` functions for the time_weight aggregate. Makes `trapezoidal` an alias for `linear` in `time_weight` as it might be a more familiar numeric integral method for some.
- [#517](https://github.com/timescale/timescaledb-toolkit/pull/517): Add a gap preserving `lttb` named `gp_lttb` to handle downsampling of data with large gaps.
- [#513](https://github.com/timescale/timescaledb-toolkit/pull/513): Add `first_val`, `last_val`, `first_time` and `last_time` to `time_weight` and `counter_agg` to access the first and the last data points within the aggregate data structures.
- [#527](https://github.com/timescale/timescaledb-toolkit/pull/527): Rename `{open, high, low, close}_at` to `{open, high, low, close}_time` to be consistent with newly added `first_time` and `last_time` accessor functions.

#### Stabilized features

- [#498](https://github.com/timescale/timescaledb-toolkit/pull/498): Stabilize `asap_smooth` aggregate.

#### Bug fixes

- [#509](https://github.com/timescale/timescaledb-toolkit/pull/509), [#531](https://github.com/timescale/timescaledb-toolkit/pull/531): Fix bugs in`hyperloglog`. Error rates are now significantly more consistent when the number of buckets are close to the actual cardinality.
- [#514](https://github.com/timescale/timescaledb-toolkit/pull/514): Fix a bug in `toolkit_experimental.interpolated_delta`.
- [#503](https://github.com/timescale/timescaledb-toolkit/pull/503): Fix bitwise logic in timevector combine.
- [#507](https://github.com/timescale/timescaledb-toolkit/pull/507): Fix a typo in `approx_count_distinct`.

#### Other notable changes

- DEB packages for Ubuntu 18.04 (Bionic) on amd64 are now available.
- [#536](https://github.com/timescale/timescaledb-toolkit/pull/536): Document equirement to use same compiler for cargo-pgx and Toolkit.
- [#535](https://github.com/timescale/timescaledb-toolkit/pull/535): Make tests pass in Canadian locales. 
- [#537](https://github.com/timescale/timescaledb-toolkit/pull/537): Enforce `cargo fmt` in CI.
- [#524](https://github.com/timescale/timescaledb-toolkit/pull/524): Updating Toolkit To Start Using Cargo Fmt.
- [#522](https://github.com/timescale/timescaledb-toolkit/pull/522): Move update-tester tests to markdown files.

#### Shout-outs

- @BenSandeen for fixing typos and errors in the hyperloglog++ implementation.
- @jaskij for reporting security advisories and suggestion on documenting support for PG 14.
- @jeremyhaberman for fixing a typo in `APPROX_COUNT_DISTINCT_DEFAULT_SIZE`.
- @jledentu for reporting an error on `interpolated_delta`.
- @stevedrip for a very detailed bug report on hyperloglog++ and suggestions for fixing it.

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/1.10.1...1.11.0

## [1.10.1](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.10.1) (2022-08-18)

#### New experimental features

- [#490](https://github.com/timescale/timescaledb-toolkit/pull/490): Month normalization function `month_normalize` and the helper function `days_in_month`, useful for normalizing data to a fixed month length for more meaningful month-to-month comparison.
- [#496](https://github.com/timescale/timescaledb-toolkit/pull/496): `OHLC` aggregate, and the associated `rollup` and accessor functions `open`, `high`, `low`, `close`, `{open, high, low, close}_at` mainly for trading data.

#### Stabilized features

- [#495](https://github.com/timescale/timescaledb-toolkit/pull/495): `LTTB` downsampling function.
- [#491](https://github.com/timescale/timescaledb-toolkit/pull/491), [#488](https://github.com/timescale/timescaledb-toolkit/pull/488): The arrow operators (->) of the accessor functions for `stats_agg`, `percentile_agg`, `counter_agg`, `gauge_agg` and `hyperloglog`. As an example, `average` accessor can now be used with `stats_agg` like this,
    ```SQL
    select location, 
        stats_agg(temperature) -> average() AS avg_temperature
    from conditions 
    group by location
    ```

#### Bug fixes

- [#465](https://github.com/timescale/timescaledb-toolkit/pull/465): Off by one error in state_agg interpolate.

#### Other notable changes

- Fix an issue where the 1.9.0 release unintentionally identified the toolkit extension version as 1.10.0-dev in the postgresql control file.
- [#467](https://github.com/timescale/timescaledb-toolkit/pull/467): Document supported platforms in Readme.
- [#463](https://github.com/timescale/timescaledb-toolkit/pull/463): Use pg14 as an example for instructions in  instead of pg13. Add reference to deb and rpm packages.

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/1.8.0...1.10.1

## [1.9.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.9.0) (2022-08-16)

**An incorrect version (1.10.0-dev) was used which can cause upgrade failures. Not made GA.**

## [1.8.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.8.0) (2022-07-05)

#### New experimental features

- [#454](https://github.com/timescale/timescaledb-toolkit/pull/454): Saturating Math for i32/integers:
    - `saturating_add`
    - `saturating_add_pos`
    - `saturating_sub`
    - `saturating_sub_pos`
    - `saturating_mul`
- [#456](https://github.com/timescale/timescaledb-toolkit/pull/456): Adding interpolating accessors:
    - `interpolated_duration_in` to `state_agg`, 
    - `interpolated_average` to `time_weight`, `interpolated_delta`
    - `interpolated_rate` to `counter_agg` and `gauge_agg`.
- [#388](https://github.com/timescale/timescaledb-toolkit/pull/388): Create Count-Min Sketch crate.
- [#459](https://github.com/timescale/timescaledb-toolkit/pull/459): Add a convenient `approx_count_distinct` function which internally uses hyperloglog with a default bucket size of 2^15.
- [#458](https://github.com/timescale/timescaledb-toolkit/pull/458): Add `count_min_sketch` aggregate and `approx_count` accessor.
- [#434](https://github.com/timescale/timescaledb-toolkit/pull/434): Initial changes to support aarch64-unknown-linux-gnu.

#### Bug fixes

- [#429](https://github.com/timescale/timescaledb-toolkit/pull/429): Support explicit NULL values in timevectors.
- [#441](https://github.com/timescale/timescaledb-toolkit/pull/441): Relax tolerance in UDDSketch merge assertions.
- [#444](https://github.com/timescale/timescaledb-toolkit/pull/444): Fix default collation deserialization.

#### Other notable changes

- [#451](https://github.com/timescale/timescaledb-toolkit/pull/451): Improve error message for HyperLogLog.
- [#417](https://github.com/timescale/timescaledb-toolkit/pull/417): Include both pgx 0.2.x and pgx 0.4.x in CI image.
- [#416](https://github.com/timescale/timescaledb-toolkit/pull/416): Prepare for the 1.8.0 cycle.
- [#418](https://github.com/timescale/timescaledb-toolkit/pull/418): Made update-tester require two versions of cargo-pgx.
- [#421](https://github.com/timescale/timescaledb-toolkit/pull/421): Don't install pgx as root or under "/".
- [#427](https://github.com/timescale/timescaledb-toolkit/pull/427): Fix failing update-tester in CI.
- [#428](https://github.com/timescale/timescaledb-toolkit/pull/428): Update github cache keys.
- [#430](https://github.com/timescale/timescaledb-toolkit/pull/430): Lock pgx versions all the way.
- [#408](https://github.com/timescale/timescaledb-toolkit/pull/408): Upgrade to pgx 0.4.5.
- [#436](https://github.com/timescale/timescaledb-toolkit/pull/436): Change which cargo-pgx subcommand is added to PATH in CI image.
- [#432](https://github.com/timescale/timescaledb-toolkit/pull/432): Remove PATH hack in tools/build script.
- [#437](https://github.com/timescale/timescaledb-toolkit/pull/437): GitHub Actions improvements.
- [#448](https://github.com/timescale/timescaledb-toolkit/pull/448): Run clippy GitHub Actions job without qualification.
- [#446](https://github.com/timescale/timescaledb-toolkit/pull/446): Update README.md.
- [#414](https://github.com/timescale/timescaledb-toolkit/pull/414): Specify Ubuntu 20.04 instead of 'latest' in github configuration.

#### Shout-outs

- @tyhoff for reporting UDDSketch assertion error [#396](https://github.com/timescale/timescaledb-toolkit/issues/396).
- @hardikm10 for reporting hyperloglog deserialization issue [#443](https://github.com/timescale/timescaledb-toolkit/issues/443).

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/1.7.0...1.8.0

## [1.7.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.7.0) (2022-05-10)

#### New experimental features

- [#389](https://github.com/timescale/timescaledb-toolkit/pull/389): Create typed specialization for `freq_agg` and `topn_agg`.

#### Bug fixes

- [#378](https://github.com/timescale/timescaledb-toolkit/pull/378): Return INTERVAL from `duration_in(TEXT, StateAgg)` instead of `i64`.
- [#379](https://github.com/timescale/timescaledb-toolkit/pull/379): Handle NULL output from our aggregates: `asap`, `counter_agg`, `freq_agg`, `gauge_agg`, `hyperloglog`, `lttb`, `stats_agg`, `tdigest`, `uddsketch`.

#### Other notable changes
- [#367](https://github.com/timescale/timescaledb-toolkit/pull/367): Switch stabilization tests to new info, meaning that there's one central location for stabilization info.
- [#372](https://github.com/timescale/timescaledb-toolkit/pull/372): Improve tools/build flexibility for local builds.
- [#394](https://github.com/timescale/timescaledb-toolkit/pull/394): Copy almost all the counter_agg functions for gauge_agg.
- [#395](https://github.com/timescale/timescaledb-toolkit/pull/395): Remove GUC as they are no longer needed.
- [#399](https://github.com/timescale/timescaledb-toolkit/pull/399): Allow manual packaging.
- [#405](https://github.com/timescale/timescaledb-toolkit/pull/405): Update CI to rust 1.60.
- [#407](https://github.com/timescale/timescaledb-toolkit/pull/407): Update postgres versions in ci Dockerfile.
- [#409](https://github.com/timescale/timescaledb-toolkit/pull/409): Make dependencies version explicit in our CI image.
- [#404](https://github.com/timescale/timescaledb-toolkit/pull/404): Refactor TimeVector to greatly simplify structure.
- [#412](https://github.com/timescale/timescaledb-toolkit/pull/412): Allow building CI image in Actions.
- [#411](https://github.com/timescale/timescaledb-toolkit/pull/411), [#413](https://github.com/timescale/timescaledb-toolkit/pull/413): Create reportpackagingfailures.yml for reporting packaging failures not from CI builds.

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/1.6.0...1.7.0

## [1.6.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.6.0) (2022-03-29)

#### New experimental features

- [#330](https://github.com/timescale/timescaledb-toolkit/pull/330): Add serialization for FrequencyTransState.
- [#368](https://github.com/timescale/timescaledb-toolkit/pull/368): Add `into_values` function for `state_agg`.
- [#370](https://github.com/timescale/timescaledb-toolkit/pull/370): Add a `topn (topn_agg)` variant of `freq_agg`, which is more convenient to use.
- [#375](https://github.com/timescale/timescaledb-toolkit/pull/375): Add `gauge_agg` and associated accessor functions `delta`, `idelta_left`, `idelta_right`, and the `rollup` function.

#### Other notable changes

- [#332](https://github.com/timescale/timescaledb-toolkit/pull/332): Speed up builds by fixing github action cache and cargo build cache.
- [#377](https://github.com/timescale/timescaledb-toolkit/pull/377): Stop auto building _nightly_ image.

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/1.5.2...1.6.0

## [1.5.2](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.5.2) (2022-03-07)

**HIGH PRIORITY SECURITY UPDATE**.

#### Bug fixes

- There's a vulnerability in Toolkit 1.5 and earlier due to the fact that it creates a PLPGSQL function using CREATE OR REPLACE and without properly locking down the search path. This means that a user could pre-create the trigger function to run arbitrary code. To fix this we remove the trigger entirely; it no longer pulls its weight. This fix locks down our update scripts to only use CREATE OR REPLACE when actually necessary; while we don't yet have an exploit for the other functions, it would be unsurprising if one exists.
- [#351](https://github.com/timescale/timescaledb-toolkit/pull/351): Make serialize functions strict to handle NULL values in partitioned aggregates.

#### Shout-outs

- @svenklemm for reporting the vulnerability.

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/1.5.0...1.5.2

## [1.5.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.5.0) (2022-01-31)

**The first version that unifies the community build with Timescale Cloud build.**

#### New experimental features

- `freq_agg` for estimating the most common elements in a column.
- `state_agg` for measuring the total time spent in different states.

#### Other notable changes

- Enforce clippy linting.
- Update rust to 1.57.

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/1.4.0...1.5.0

## [1.4.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.4.0), [1.4.0-cloud](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.4.0-cloud) (2021-11-17)

#### Stabilized features

- Postgres 14 support.

#### Other notable changes

- Upgrade pgx to 0.2.

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/1.3.1...1.4.0-cloud

## [1.3.1](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.3.1), [1.3.1-cloud](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.3.1-cloud) (2021-10-27)

#### Stabilized features

- Postgres 14 support.

#### Other notable changes

- Upgrade pgx to 0.2.

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/1.3.0...1.3.1-cloud

## [1.3.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.3.0), [1.3.0-cloud](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.3.0-cloud) (2021-10-18)

#### New experimental features

- `timevector` function pipelines: a compact and more readable way to perform a sequence of analytic operations such as the following one,
    ```
    timevector(ts, val) -> sort() -> delta() -> abs() -> sum()
    ```
- `->` accessor for Toolkit types enables syntax like `stats_agg(data) -> average()`.
- `to_epoch()` wrapper for `extract ('EPOCH' FROM timestamp)` that makes it work more like an inverse of `to_timestamp(DOUBLE PRECISION)`.
#### Stabilized features

- `counter_agg` helper functions for Prometheus-style resetting monotonic counters.
- `hyperloglog` efficient approximate COUNT DISTINCT.
- `stats_agg` two-step aggregate for common statistics.

#### Other notable changes

- This release changes the textual I/O format for Toolkit types. We are uncertain if we will need to do so again in the future. Due to this we currently only support dump/restore within a single version of the extension.

#### Shout-outs

- @jonatas for the contribution [#237](https://github.com/timescale/timescaledb-toolkit/pull/237).
- @burmecia for the contribution [#251](https://github.com/timescale/timescaledb-toolkit/pull/251).

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/1.2.0...1.3.0-cloud

## [1.2.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.2.0), [1.2.0-cloud](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.2.0-cloud) (2021-09-14)

#### New experimental features

- Refinements to `hyperloglog` including a function to report relative error and fixing the functionality of `rollup`.
- Introduction of a `topn` approximation API. Presently this will only work for integer data, but expect to see further refinements that greatly expand this behavior.
- New `map_series` and `map_data` pipeline elements for the time series API that allow uses to provide custom transforms of their time series data. Additionally introduced a `|>>` pipeline operator for an even more streamlined interface into the new mapping functionality.

#### Bug fixes

- Make a pass through all toolkit functions to correctly label behavior as immutable and parallel safe. This should improve the optimizations Postgres can apply to toolkit plans, particularly when run in a Timescale multinode cluster.
- Improve handling of internal data structures to reduce extraneous copies of data.

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/1.1.0...1.2.0-cloud

## [1.1.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.1.0), [1.1.0-cloud](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.1.0-cloud) (2021-08-04)

#### New experimental features

- `hyperloglog` has been updated to use Hyperloglog++ under the hood. This does not change the user-facing API but should improve the accuracy of hyperloglog() estimates. This is the last major change expected for hyperloglog() and is now a candidate for stabilization pending user feedback.
- We've started experimenting with the pipeline API. While it's still very much a work in progress, it's at a point where the high-level concepts should be understandable. For example, a pipeline that outputs the daily change of a set of data, interpolating away any gaps in daily data, could look like
    ```
    SELECT timeseries(time, val)
        |> sort()
        |> resample_to_rate('trailing_average', '24 hours', true)
        |> fill_holes('interpolate')
        |> delta()
    FROM ...
    ```
    It's still early days for this API and it is not yet polished, but we would love feedback about its direction.

#### Bug fixes

- Fix a small memory leak in aggregation functions. This could have leaked ≈8 bytes per aggregate call.

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/1.0.0...1.1.0-cloud

## [1.0.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.0.0), [1.0.0-cloud](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.0.0-cloud) (2021-07-12)

**This release renames the extension to `TimescaleDB Toolkit` from Timescale Analytics and starts stabilizing functionality.**

#### New experimental features

- `stats_agg()` eases the analysis of more sophisticated bucketed statistics, such as rolling averages. (Docs are forthcoming, until then fell free to peruse the design discussion doc.
- `timeseries` which will serve as a building block for many pipelines, and unifies the output of lttb and ASAP.

#### Stabilized features

- Percentile-approximation algorithms including `percentile_agg()`, `uddsketch()` and `tdigest()` along with their associated functions. These are especially useful for computing percentiles in continuous aggregates.
- [Time-weighted average](https://github.com/timescale/timescaledb-toolkit/blob/main/docs/time_weighted_average.md) along with its associated functions. This eases taking the average over an irregularly spaced dataset that only includes changepoints.

#### Other notable changes

- The on-disk layout `uddsketch` has be reworked to store buckets compressed. This can result in an orders-of-magnitude reduction in it's storage requirements.
- The textual format `uddsketch` has been reworked to be more readable.
- Functions that take in a `uddsketch` or `tdigest` have been reworked to be 0-copy when applicable, improving the performance of such functions by 10-100x.

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/0.3.0...1.0.0-cloud

## [0.3.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/0.3.0), [0.3.0-cloud](https://github.com/timescale/timescaledb-toolkit/releases/tag/0.3.0-cloud) (2021-06-17)

#### Other notable changes

- Internal improvements.
- Largely prep work for the upcoming 1.0 release.

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/0.2.0...0.3.0-cloud

## [0.2.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/0.2.0) (2021-04-08), [0.2.0-cloud](https://github.com/timescale/timescaledb-toolkit/releases/tag/0.2.0-cloud) (2021-04-29)

#### New experimental features

- ASAP Smoothing (`asap_smooth`) – A graph smoothing algorithm that highlights changes.
- Counter Aggregates (`counter_agg`) – Tools to ease working with reset-able counters.
- Largest Triangle Three Buckets (`lttb`) – A downsampling algorithm that tries to preserve visual similarity.
- Time Bucket Range – A version of `time_bucket()` that outputs the [start, end) times of the bucket.
- Update `UddSketch` with an aggregate that merges multiple `UddSketchs` and various internal improvements.

**Full Changelog**: https://github.com/timescale/timescaledb-toolkit/compare/0.1.0...0.2.0-cloud

## [0.1.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/0.1.0) (2021-03-03)

#### New experimental features

- `hyperloglog` – An approximate COUNT DISTINCT based on hashing that provides reasonable accuracy in constant space.
- `tdigest` – A quantile estimate sketch optimized to provide more accurate estimates near the tails (i.e. 0.001 or 0.995) than conventional approaches.
- `uddsketch` – A quantile estimate sketch which provides a guaranteed maximum relative error.
- Time-weighted average (`time_weight`) – A time-weighted averaging function to determine the value of things proportionate to the time they are set.

#### Stabilized features

- None. All features are experimental.


================================================
FILE: LICENSE
================================================
Unless otherwise Source code in this repository, and any binaries built from
this source code, in whole or in part, are licensed under the
Timescale License (the "License"). You may not use these files except in
compliance with the License.

You may obtain a copy of the License at

   https://github.com/timescale/timescaledb/blob/master/tsl/LICENSE-TIMESCALE


================================================
FILE: NOTICE
================================================
TimescaleDB-Toolkit (TM)

Copyright (c) 2021-2024  Timescale, Inc. All Rights Reserved.

Unless otherwise stated, source code in this repository, and any binaries built
from this source code, in whole or in part, are licensed under the
Timescale License (the "License"). You may not use these files except in
compliance with the License.

You may obtain a copy of the License at

   https://github.com/timescale/timescaledb/blob/master/tsl/LICENSE-TIMESCALE


================================================
FILE: Readme.md
================================================
[![CI](https://github.com/timescale/timescaledb-toolkit/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/timescale/timescaledb-toolkit/actions/workflows/ci.yml)

# TimescaleDB Toolkit

This repository is the home of the TimescaleDB Toolkit team. Our mission is to
ease all things analytics when using TimescaleDB, with a particular focus on
developer ergonomics and performance. Our issue tracker contains more
on [the features we're planning to work on](https://github.com/timescale/timescaledb-toolkit/labels/proposed-feature)
and [the problems we're trying to solve](https://github.com/timescale/timescaledb-toolkit/labels/feature-request).

Documentation for this version of the TimescaleDB Toolkit extension can be found
in this repository at [`docs`](https://github.com/timescale/timescaledb-toolkit/tree/main/docs).
The release history can be found on this repo's [GitHub releases](https://github.com/timescale/timescaledb-toolkit/releases).

## 🖥 Try It Out

The extension comes pre-installed on all [Tiger Cloud](https://www.tigerdata.com/cloud) instances and also on our full-featured [`timescale/timescaledb-ha` docker image](https://hub.docker.com/r/timescale/timescaledb-ha).

If DEB and RPM packages are a better fit for your situation, refer to the [Install Toolkit on self-hosted TimescaleDB](https://docs.timescale.com/self-hosted/latest/tooling/install-toolkit/#install-toolkit-on-self-hosted-timescaledb) how-to guide for further instructions on installing the extension via your package manager.

All versions of the extension contain experimental features in the `toolkit_experimental` schema. See [our docs section on experimental features](/docs/README.md#tag-notes) for more details.

## 💿 Installing From Source

### Supported platforms

The engineering team regularly tests the extension on the following platforms:

- x86_64-unknown-linux-gnu (Ubuntu Linux 24.04) (tested prior to every merge)
- aarch64-unknown-linux-gnu (Ubuntu Linux 24.04) (tested at release time)
- x86_64-apple-darwin (MacOS 12) (tested frequently on eng workstation)
- aarch64-apple-darwin (MacOS 12) (tested frequently on eng workstation)

As for other platforms: patches welcome!

### 🔧 Tools Setup

Building the extension requires valid [rust](https://www.rust-lang.org/) (we build and test on 1.65), [rustfmt](https://github.com/rust-lang/rustfmt), and clang installs, along with the postgres headers for whichever version of postgres you are running, and pgrx.
We recommend installing rust using the [official instructions](https://www.rust-lang.org/tools/install):

```bash
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
```

and build tools, the postgres headers, in the preferred manner for your system. You may also need to install OpenSSl.
For Ubuntu you can follow the [postgres install instructions](https://www.postgresql.org/download/linux/ubuntu/) then run

```bash
sudo apt-get install make gcc pkg-config clang postgresql-server-dev-14 libssl-dev
```

Next you need [cargo-pgrx](https://github.com/tcdi/pgrx), which can be installed with

```bash
cargo install --version '=0.16.1' --force cargo-pgrx
```

You must reinstall cargo-pgrx whenever you update your Rust compiler, since cargo-pgrx needs to be built with the same compiler as Toolkit.

Finally, setup the pgrx development environment with

```bash
cargo pgrx init --pg14 pg_config
```

Installing from source is also available on macOS and requires the same set of prerequisites and set up commands listed above.

### 💾 Building and Installing the extension

Download or clone this repository, and switch to the `extension` subdirectory, e.g.

```bash
git clone https://github.com/timescale/timescaledb-toolkit && \
cd timescaledb-toolkit/extension
```

Then run

```
cargo pgrx install --release && \
cargo run --manifest-path ../tools/post-install/Cargo.toml -- pg_config
```

To initialize the extension after installation, enter the following into `psql`:

```
CREATE EXTENSION timescaledb_toolkit;
```

## ✏️ Get Involved

We appreciate your help in shaping the project's direction! Have a look at the
[list of features we're thinking of working on](https://github.com/timescale/timescaledb-toolkit/labels/proposed-feature)
and feel free to comment on the features or expand the list.

### 🔨 Testing

See above for prerequisites and installation instructions.

You can run tests against a postgres version `pg15`, `pg16`, `pg17`, or `pg18` using

```
cargo pgrx test ${postgres_version}
```

## Learn about Tiger Data

Tiger Data is the fastest PostgreSQL for transactional, analytical, and agentic workloads. To learn more about the company and its products, visit [tigerdata.com](https://www.tigerdata.com).

================================================
FILE: crates/aggregate_builder/Cargo.toml
================================================
[package]
name = "aggregate_builder"
version = "0.1.0"
edition = "2018"

[lib]
proc-macro = true

[dependencies]
syn = {version="1.0", features=["extra-traits", "visit", "visit-mut", "full"]}
quote = "1.0"
proc-macro2 = "1.0"

[features]
print-generated = []


================================================
FILE: crates/aggregate_builder/Readme.md
================================================
# Aggregate Builder #

Library for building Postgres [aggregate functions](https://www.postgresql.org/docs/current/xaggr.html)
that imitates
[`CREATE AGGREGATE`](https://www.postgresql.org/docs/current/sql-createaggregate.html).

## Syntax ##

Current syntax looks something like like

```rust
#[aggregate] impl aggregate_name {
    type State = InternalTransitionType;

    fn transition(
        state: Option<State>,
        #[sql_type("sql_type")] argument: RustType, // can have an arbitrary number of args
    ) -> Option<State> {
        // transition function function body goes here
    }

    fn finally(state: Option<&mut State>) -> Option<ResultType> {
        // final function function body goes here
    }

    // the remaining items are optional

    // parallel-safety marker if desirable
    const PARALLEL_SAFE: bool = true;

    fn serialize(state: &State) -> bytea {
        // serialize function body goes here
    }

    fn deserialize(bytes: bytea) -> State {
        // deserialize function body goes here
    }

    fn combine(state1: Option<&State>, state2: Option<&State>) -> Option<State> {
        // combine function body goes here
    }
}
```

All items except for `type State`, `fn transition()`, and `fn finally()` are
optional. The SQL for the aggregate and its functions will be created
automatically, and any necessary memory context switching is handled
automatically for most cases¹.

¹It will switch to the aggregate memory context before calling the transition
function body and the combine function body. Looking through `array_agg()`'s
code this seems to be the correct places to do so. Note that if you want to
allocate in the aggregate memory context in the final function other work may
be needed.

## Example ##

Below is a complete example of an `anything()` aggregate that returns one of
the aggregated values.

```rust
#[aggregate] impl anything {
    type State = String;

    fn transition(
        state: Option<State>,
        #[sql_type("text")] value: String,
    ) -> Option<State> {
        state.or(Some(value))
    }

    fn finally(state: Option<&State>) -> Option<String> {
        state.as_deref().cloned()
    }
}
```

## Expansion ##

Ignoring some supplementary type checking we add to improve error messages, the
macro expands aggregate definitions to rust code something like the following
(explanations as comments in-line)
```rust
// we nest things within a module to mimic the namespacing of an `impl` block
pub mod aggregate_name {
    // glob import to further act like an `impl`
    use super::*;

    pub type State = String;

    // PARALLEL_SAFE constant in case someone wants to use it
    // unlikely to be actually used in practice
    #[allow(dead_code)]
    pub const PARALLEL_SAFE: bool = true;

    #[pgrx::pg_extern(immutable, parallel_safe)]
    pub fn aggregate_name_transition_fn_outer(
        __inner: pgrx::Internal,
        value: RustType,
        __fcinfo: pg_sys::FunctionCallInfo,
    ) -> Option<Internal> {
        use crate::palloc::{Inner, InternalAsValue, ToInternal};
        unsafe {
            // Translate from the SQL type to the rust one
            // we actually store an `Option<State>` rather than a `State`.
            let mut __inner: Option<Inner<Option<State>>> = __inner.to_inner();
            // We steal the state out from under the pointer leaving `None` in
            // its place. This means that if the inner transition function
            // panics the inner transition function will free `State` while the
            // teardown hook in the aggregate memory context will only free inner
            let inner: Option<State> = match &mut __inner {
                None => None,
                Some(inner) => Option::take(&mut **inner),
            };
            let state: Option<State> = inner;
            // Switch to the aggregate memory context. This ensures that the
            // transition state lives for as long as the aggregate, and that if
            // we allocate from Postgres within the inner transition function
            // those too will stay around.
            crate::aggregate_utils::in_aggregate_context(__fcinfo, || {
                // call the inner transition function
                let result = transition(state, value);

                // return the state to postgres, if we have a pointer just store
                // in that, if not allocate one only if needed.
                let state: Option<State> = result;
                __inner = match (__inner, state) {
                    (None, None) => None,
                    (None, state @ Some(..)) => Some(state.into()),
                    (Some(mut inner), state) => {
                        *inner = state;
                        Some(inner)
                    }
                };
                __inner.internal()
            })
        }
    }
    pub fn transition(state: Option<State>, value: String) -> Option<State> {
        // elided
    }

    #[pgrx::pg_extern(immutable, parallel_safe)]
    pub fn aggregate_name_finally_fn_outer(
        __internal: pgrx::Internal,
        __fcinfo: pg_sys::FunctionCallInfo,
    ) -> Option<String> {
        use crate::palloc::InternalAsValue;
        unsafe {
            // Convert to the rust transition type, see the comment in the
            // transition function for why we store an `Option<State>`
            let mut input: Option<Inner<Option<State>>> = __internal.to_inner();
            let input: Option<&mut State> = input.as_deref_mut()
                .map(|i| i.as_mut())
                .flatten();
            // We pass in an `Option<&mut State>`; `Option<>` because the
            // transition state might not have been initialized yet;
            // `&mut State` since while the final function has unique access to
            // the transition function it must leave it a valid state when it's
            // finished
            let state: Option<&mut State> = input;
            finally(state)
        }
    }
    pub fn finally(state: Option<&mut State>) -> Option<String> {
        // elided
    }

    #[pgrx::pg_extern(strict, immutable, parallel_safe, schema = "toolkit_experimental")]
    pub fn aggregate_name_serialize_fn_outer(__internal: pgrx::Internal) -> bytea {
        use crate::palloc::{Inner, InternalAsValue};
        // Convert to the rust transition type, see the comment in the
        // transition function for why we store an `Option<State>`
        let input: Option<Inner<Option<State>>> = unsafe { __internal.to_inner() };
        let mut input: Inner<Option<State>> = input.unwrap();
        // We pass by-reference for the same reason as the final function.
        // Note that _technically_ you should not mutate in the serialize,
        // function though there are cases you can get away with it when using
        // an `internal` transition type.
        let input: &mut State = input.as_mut().unwrap();
        let state: &State = input;
        serialize(state)
    }
    pub fn serialize(state: &State) -> bytea {
        // elided
    }

    #[pgrx::pg_extern(strict, immutable, parallel_safe, schema = "toolkit_experimental")]
    pub fn aggregate_name_deserialize_fn_outer(
        bytes: crate::raw::bytea,
        _internal: Internal,
    ) -> Option<Internal> {
        use crate::palloc::ToInternal;
        let result = deserialize(bytes);
        let state: State = result;
        // Convert to the rust transition type, see the comment in the
        // transition function for why we store an `Option<State>`.
        // We deliberately don't switch to the aggregate transition context
        // because the postgres aggregates do not do so.
        let state: Inner<Option<State>> = Some(state).into();
        unsafe { Some(state).internal() }
    }
    pub fn deserialize(bytes: crate::raw::bytea) -> State {
        // elided
    }

    #[pgrx::pg_extern(immutable, parallel_safe, schema = "toolkit_experimental")]
    pub fn aggregate_name_combine_fn_outer(
        a: Internal,
        b: Internal,
        __fcinfo: pg_sys::FunctionCallInfo,
    ) -> Option<Internal> {
        use crate::palloc::{Inner, InternalAsValue, ToInternal};
        unsafe {
            // Switch to the aggregate memory context. This ensures that the
            // transition state lives for as long as the aggregate, and that if
            // we allocate from Postgres within the inner transition function
            // those too will stay around.
            crate::aggregate_utils::in_aggregate_context(__fcinfo, || {
                let result = combine(a.to_inner().as_deref(), b.to_inner().as_deref());
                let state: Option<State> = result;
                let state = match state {
                    None => None,
                    state @ Some(..) => {
                        let state: Inner<Option<State>> = state.into();
                        Some(state)
                    }
                };
                state.internal()
            })
        }
    }
    pub fn combine(a: Option<&State>, b: Option<&State>) -> Option<State> {
        // elided
    }

    // SQL generated for the aggregate
    pgrx::extension_sql!("\n\
        CREATE AGGREGATE toolkit_experimental.aggregate_name (value RustType) (\n\
            stype = internal,\n\
            sfunc = toolkit_experimental.aggregate_name_transition_fn_outer,\n\
            finalfunc = toolkit_experimental.aggregate_name_finally_fn_outer,\n\
            parallel = safe,\n
            serialfunc = toolkit_experimental.aggregate_name_serialize_fn_outer,\n\
            deserialfunc = toolkit_experimental.aggregate_name_deserialize_fn_outer,\n\
            combinefunc = toolkit_experimental.aggregate_name_combine_fn_outer\n\
        );\n",
        name = "aggregate_name_extension_sql",
        requires = [
            aggregate_name_transition_fn_outer,
            aggregate_name_finally_fn_outer,
            aggregate_name_serialize_fn_outer,
            aggregate_name_deserialize_fn_outer,
            aggregate_name_combine_fn_outer,
        ],
    );
}
```

================================================
FILE: crates/aggregate_builder/src/lib.rs
================================================
use std::borrow::Cow;

use proc_macro::TokenStream;

use proc_macro2::{Span, TokenStream as TokenStream2};

use quote::{quote, quote_spanned};

use syn::{
    parse::{Parse, ParseStream},
    parse_macro_input, parse_quote,
    punctuated::Punctuated,
    spanned::Spanned,
    token::Comma,
    Token,
};

#[proc_macro_attribute]
pub fn aggregate(_attr: TokenStream, item: TokenStream) -> TokenStream {
    // Parse the input tokens into a syntax tree
    let input = parse_macro_input!(item as Aggregate);
    let expanded = expand(input);
    if cfg!(feature = "print-generated") {
        println!("{expanded}");
    }
    expanded.into()
}

//
// Parser
//

// like ItemImpl except that we allow `name: Type "SqlType"` for `fn transition`
struct Aggregate {
    schema: Option<syn::Ident>,
    name: syn::Ident,

    state_ty: AggregateTy,

    parallel_safe: Option<syn::LitBool>,

    transition_fn: AggregateFn,
    final_fn: AggregateFn,

    serialize_fn: Option<AggregateFn>,
    deserialize_fn: Option<AggregateFn>,
    combine_fn: Option<AggregateFn>,
}

enum AggregateItem {
    State(AggregateTy),
    Fn(AggregateFn),
    ParallelSafe(AggregateParallelSafe),
}

struct AggregateTy {
    ident: syn::Ident,
    ty: Box<syn::Type>,
}

struct AggregateParallelSafe {
    value: syn::LitBool,
}

struct AggregateFn {
    ident: syn::Ident,
    sql_name: Option<syn::LitStr>,
    parens: syn::token::Paren,
    args: Punctuated<AggregateArg, Comma>,
    ret: syn::ReturnType,
    body: syn::Block,
    fcinfo: Option<AggregateArg>,
}

#[derive(Clone)]
struct AggregateArg {
    rust: syn::PatType,
    sql: Option<syn::LitStr>,
}

macro_rules! error {
    ($span: expr, $fmt: literal, $($arg:expr),* $(,)?) => {
        return Err(syn::Error::new($span, format!($fmt, $($arg),*)))
    };
    ($span: expr, $msg: literal) => {
        return Err(syn::Error::new($span, $msg))
    };
}

macro_rules! check_duplicate {
    ($val: expr, $span:expr, $name: expr) => {
        if $val.is_some() {
            error!($span, "duplicate {}")
        }
    };
}

impl Parse for Aggregate {
    fn parse(input: ParseStream) -> syn::Result<Self> {
        let _: Token![impl] = input.parse()?;

        let first_path_segment = input.parse()?;
        let (schema, name): (_, syn::Ident) = if input.peek(Token![::]) {
            let _: Token![::] = input.parse()?;
            (Some(first_path_segment), input.parse()?)
        } else {
            (None, first_path_segment)
        };

        let body;
        let _brace_token = syn::braced!(body in input);
        let mut state_ty = None;

        let mut parallel_safe = None;

        let mut fns: Vec<AggregateFn> = vec![];
        while !body.is_empty() {
            use AggregateItem::*;
            let item = body.parse()?;
            match item {
                State(ty) => {
                    if ty.ident != "State" {
                        error!(
                            ty.ident.span(),
                            "unexpected `type {}`, expected `State`", ty.ident
                        )
                    }
                    if state_ty.is_some() {
                        error!(ty.ident.span(), "duplicate `type State`")
                    }
                    state_ty = Some(ty);
                }
                ParallelSafe(safe) => parallel_safe = Some(safe.value),
                Fn(f) => {
                    fns.push(f);
                }
            }
        }

        let mut transition_fn = None;
        let mut final_fn = None;
        let mut serialize_fn = None;
        let mut deserialize_fn = None;
        let mut combine_fn = None;
        for f in fns {
            if f.ident == "transition" {
                check_duplicate!(transition_fn, f.ident.span(), "`fn transition`");
                if f.args.is_empty() {
                    error!(
                        f.parens.span,
                        "transition function must have at least one argument"
                    )
                }
                for arg in f.args.iter().skip(1) {
                    if arg.sql.is_none() {
                        error!(arg.rust.span(), "missing SQL type")
                    }
                }
                transition_fn = Some(f);
            } else if f.ident == "finally" {
                check_duplicate!(final_fn, f.ident.span(), "`fn finally`");
                if f.args.len() != 1 {
                    error!(
                        f.parens.span,
                        "final function must have at one argument of type `Option<Inner<State>>`"
                    )
                }
                if f.args[0].sql.is_some() {
                    error!(
                        f.args[0].sql.span(),
                        "should not have SQL type, will be inferred"
                    )
                }
                final_fn = Some(f);
            } else if f.ident == "serialize" {
                check_duplicate!(serialize_fn, f.ident.span(), "`fn serialize`");
                if f.args.len() != 1 {
                    error!(
                        f.parens.span,
                        "serialize function must have at one argument of type `Inner<State>`"
                    )
                }
                if f.args[0].sql.is_some() {
                    error!(
                        f.args[0].sql.span(),
                        "should not have SQL type, will be inferred"
                    )
                }
                serialize_fn = Some(f);
            } else if f.ident == "deserialize" {
                check_duplicate!(deserialize_fn, f.ident.span(), "`fn deserialize`");
                if f.args.len() != 1 {
                    error!(
                        f.parens.span,
                        "deserialize function must have at one argument of type `bytea`"
                    )
                }
                if f.args[0].sql.is_some() {
                    error!(
                        f.args[0].sql.span(),
                        "should not have SQL type, will be inferred"
                    )
                }
                deserialize_fn = Some(f);
            } else if f.ident == "combine" {
                check_duplicate!(combine_fn, f.ident.span(), "`fn combine`");
                if f.args.len() != 2 {
                    error!(f.parens.span, "deserialize function must have at one argument of type `Option<Inner<State>>`")
                }
                for arg in &f.args {
                    if arg.sql.is_some() {
                        error!(arg.sql.span(), "should not have SQL type, will be inferred")
                    }
                }
                combine_fn = Some(f)
            } else {
                error!(
                    f.ident.span(),
                    "unexpected `fn {}`, expected one of `transition`, `finally`, `serialize`, `deserialize`, or `combine`",
                    f.ident
                )
            }
        }

        let state_ty = match state_ty {
            Some(state_ty) => state_ty,
            None => error!(name.span(), "missing `type State = ...;`"),
        };

        let transition_fn = match transition_fn {
            Some(transition_fn) => transition_fn,
            None => error!(name.span(), "missing `fn transition`"),
        };

        let final_fn = match final_fn {
            Some(final_fn) => final_fn,
            None => error!(name.span(), "missing `fn final`"),
        };

        Ok(Aggregate {
            schema,
            name,
            state_ty,
            parallel_safe,
            transition_fn,
            final_fn,
            serialize_fn,
            deserialize_fn,
            combine_fn,
        })
    }
}

impl Parse for AggregateItem {
    fn parse(input: ParseStream) -> syn::Result<Self> {
        let ahead = input.fork();
        let _ = ahead.call(syn::Attribute::parse_outer)?;
        let lookahead = ahead.lookahead1();
        if lookahead.peek(Token![fn]) {
            input.parse().map(AggregateItem::Fn)
        } else if lookahead.peek(Token![type]) {
            input.parse().map(AggregateItem::State)
        } else if lookahead.peek(Token![const]) {
            input.parse().map(AggregateItem::ParallelSafe)
        } else {
            Err(lookahead.error())
        }
    }
}

impl Parse for AggregateTy {
    fn parse(input: ParseStream) -> syn::Result<Self> {
        let _: Token![type] = input.parse()?;
        let ident = input.parse()?;
        let _: Token![=] = input.parse()?;
        let ty = Box::new(input.parse()?);
        let _: Token![;] = input.parse()?;
        Ok(Self { ident, ty })
    }
}

impl Parse for AggregateParallelSafe {
    fn parse(input: ParseStream) -> syn::Result<Self> {
        let _: Token![const] = input.parse()?;
        let name: syn::Ident = input.parse()?;
        if name != "PARALLEL_SAFE" {
            error!(
                name.span(),
                "unexpected const `{}` expected `PARALLEL_SAFE`", name
            )
        }
        let _: Token![:] = input.parse()?;
        let ty: syn::Ident = input.parse()?;
        if ty != "bool" {
            error!(ty.span(), "unexpected type `{}` expected `bool`", ty)
        }
        let _: Token![=] = input.parse()?;
        let value = input.parse()?;
        let _: Token![;] = input.parse()?;
        Ok(Self { value })
    }
}

fn is_fcinfo(arg: &AggregateArg) -> bool {
    if let syn::Type::Path(p) = &*arg.rust.ty {
        for id in p.path.segments.iter() {
            if id.ident == "FunctionCallInfo" {
                return true;
            }
        }
    }
    false
}

impl Parse for AggregateFn {
    fn parse(input: ParseStream) -> syn::Result<Self> {
        let mut attributes = input.call(syn::Attribute::parse_outer)?;
        let _: Token![fn] = input.parse()?;
        let ident = input.parse()?;

        let contents;
        let parens = syn::parenthesized!(contents in input);

        let mut args = Punctuated::new();
        let mut fcinfo = None;
        while !contents.is_empty() {
            let arg: AggregateArg = contents.parse()?;
            if is_fcinfo(&arg) {
                fcinfo = Some(arg);
                if contents.is_empty() {
                    break;
                }
                let _comma: Token![,] = contents.parse()?;
                continue;
            }
            args.push(arg);
            if contents.is_empty() {
                break;
            }
            let comma: Token![,] = contents.parse()?;
            args.push_punct(comma);
        }

        let ret = input.parse()?;
        let body = input.parse()?;

        let expected_path = parse_quote!(sql_name);
        let sql_name = match take_attr(&mut attributes, &expected_path) {
            None => None,
            Some(attribute) => attribute.parse_args()?,
        };
        if !attributes.is_empty() {
            error!(attributes[0].span(), "unexpected attribute")
        }
        Ok(Self {
            ident,
            sql_name,
            parens,
            args,
            ret,
            body,
            fcinfo,
        })
    }
}

impl Parse for AggregateArg {
    fn parse(input: ParseStream) -> syn::Result<Self> {
        let arg: syn::FnArg = input.parse()?;
        let mut rust = match arg {
            syn::FnArg::Typed(pat) => pat,
            _ => error!(arg.span(), "`self` is not a valid aggregate argument"),
        };
        let sql = {
            let expected_path = parse_quote!(sql_type);
            let attribute = take_attr(&mut rust.attrs, &expected_path);
            match attribute {
                None => None,
                Some(attribute) => attribute.parse_args()?,
            }
        };
        Ok(Self { rust, sql })
    }
}

fn take_attr(attrs: &mut Vec<syn::Attribute>, path: &syn::Path) -> Option<syn::Attribute> {
    let idx = attrs.iter().enumerate().find(|(_, a)| &a.path == path);
    match idx {
        None => None,
        Some((idx, _)) => {
            let attribute = attrs.remove(idx);
            Some(attribute)
        }
    }
}

//
// Expander
//

fn expand(agg: Aggregate) -> TokenStream2 {
    use std::fmt::Write;
    let Aggregate {
        schema,
        name,
        state_ty,
        parallel_safe,
        transition_fn,
        final_fn,
        serialize_fn,
        deserialize_fn,
        combine_fn,
    } = agg;

    let state_ty = state_ty.ty;

    let transition_fns = transition_fn.transition_fn_tokens(&schema, &name);
    let final_fns = final_fn.final_fn_tokens(&schema, &name);

    let mut extension_sql_reqs = vec![
        transition_fn.outer_ident(&name),
        final_fn.outer_ident(&name),
    ];

    let schema_qualifier = match &schema {
        Some(schema) => format!("{schema}."),
        None => String::new(),
    };
    let mut create = format!("\nCREATE AGGREGATE {schema_qualifier}{name} (");
    for (i, (name, arg)) in transition_fn.sql_args().enumerate() {
        if i != 0 {
            let _ = write!(&mut create, ", ");
        }
        if let Some(name) = name {
            let _ = write!(&mut create, "{name} ");
        }
        let _ = write!(&mut create, "{arg}");
    }
    let transition_fn_ident = transition_fn.outer_ident(&name);
    let final_fn_ident = final_fn.outer_ident(&name);
    let _ = write!(
        &mut create,
        ") (\n    \
            stype = internal,\n    \
            sfunc = {schema_qualifier}{transition_fn_ident},\n    \
            finalfunc = {schema_qualifier}{final_fn_ident}"
    );

    let parallel_safe = parallel_safe.map(|p| {
        let value = p.value();
        let _ = write!(
            &mut create,
            ",\n    parallel = {}",
            if value { "safe" } else { "unsafe" }
        );
        let serialize_fn_check = value
            .then(|| {
                serialize_fn.as_ref().is_none().then(|| {
                    quote_spanned!(p.span()=>
                        compile_error!("parallel safety requires a `fn serialize()` also");
                    )
                })
            })
            .flatten();
        let deserialize_fn_check = value
            .then(|| {
                deserialize_fn.as_ref().is_none().then(|| {
                    quote_spanned!(p.span()=>
                        compile_error!("parallel safety requires a `fn deserialize()` also");
                    )
                })
            })
            .flatten();
        let combine_fn_check = value
            .then(|| {
                combine_fn.as_ref().is_none().then(|| {
                    quote_spanned!(p.span()=>
                        compile_error!("parallel safety requires a `fn combine()` also");
                    )
                })
            })
            .flatten();
        quote_spanned!(p.span()=>
            #serialize_fn_check
            #deserialize_fn_check
            #combine_fn_check

            #[allow(dead_code)]
            pub const PARALLEL_SAFE: bool = #value;
        )
    });

    let mut add_function =
        |f: AggregateFn,
         field: &str,
         make_tokens: fn(&AggregateFn, &Option<syn::Ident>, &syn::Ident) -> TokenStream2| {
            extension_sql_reqs.push(f.outer_ident(&name));
            let _ = write!(
                &mut create,
                ",\n    {} = {}{}",
                field,
                schema_qualifier,
                f.outer_ident(&name)
            );
            make_tokens(&f, &schema, &name)
        };

    let serialize_fns_check = serialize_fn.as_ref().xor(deserialize_fn.as_ref()).map(|_| {
        let s = serialize_fn.as_ref().map(|f| {
            quote_spanned!(f.ident.span()=>
                compile_error!("`fn deserialize()` is also required");
            )
        });
        let d = deserialize_fn.as_ref().map(|f| {
            quote_spanned!(f.ident.span()=>
                compile_error!("`fn serialize()` is also required");
            )
        });
        quote!(#s #d)
    });

    let combine_fns_check1 = serialize_fn.as_ref().xor(combine_fn.as_ref()).map(|_| {
        let s = serialize_fn.as_ref().map(|f| {
            quote_spanned!(f.ident.span()=>
                compile_error!("`fn combine()` is also required");
            )
        });
        let c = combine_fn.as_ref().map(|f| {
            quote_spanned!(f.ident.span()=>
                compile_error!("`fn serialize()` is also required");
            )
        });
        quote!(#s #c)
    });

    let combine_fns_check2 = combine_fn.as_ref().xor(deserialize_fn.as_ref()).map(|_| {
        let s = combine_fn.as_ref().map(|f| {
            quote_spanned!(f.ident.span()=>
                compile_error!("`fn deserialize()` is also required");
            )
        });
        let d = deserialize_fn.as_ref().map(|f| {
            quote_spanned!(f.ident.span()=>
                compile_error!("`fn combine()` is also required");
            )
        });
        quote!(#s #d)
    });

    let serialize_fns =
        serialize_fn.map(|f| add_function(f, "serialfunc", AggregateFn::serialize_fn_tokens));
    let deserialize_fns =
        deserialize_fn.map(|f| add_function(f, "deserialfunc", AggregateFn::deserialize_fn_tokens));
    let combine_fns =
        combine_fn.map(|f| add_function(f, "combinefunc", AggregateFn::combine_fn_tokens));

    let _ = write!(&mut create, "\n);\n");

    let extension_sql_name = format!("{name}_extension_sql");

    quote! {
        pub mod #name {
            use super::*;

            pub type State = #state_ty;

            #serialize_fns_check

            #combine_fns_check1

            #combine_fns_check2

            #parallel_safe

            #transition_fns

            #final_fns
            #serialize_fns
            #deserialize_fns
            #combine_fns

            pgrx::extension_sql!(
                #create,
                name=#extension_sql_name,
                requires=[#(#extension_sql_reqs),*],
            );
        }
    }
}

impl AggregateFn {
    fn transition_fn_tokens(
        &self,
        schema: &Option<syn::Ident>,
        aggregate_name: &syn::Ident,
    ) -> TokenStream2 {
        let outer_ident = self.outer_ident(aggregate_name);
        let Self {
            ident,
            args,
            body,
            ret,
            fcinfo,
            ..
        } = self;

        let schema = schema.as_ref().map(|s| {
            let s = format!("{s}");
            quote!(, schema = #s)
        });

        let input_ty = &*args[0].rust.ty;

        let state_type_check = state_type_check_tokens(input_ty, Some(()));

        let fcinfo_arg = if let Some(fcinfo) = fcinfo {
            fcinfo.clone()
        } else {
            syn::parse_str::<AggregateArg>("__fcinfo: pg_sys::FunctionCallInfo").unwrap()
        };

        let mut expanded_args = args.clone();
        if let Some(fcinfo) = fcinfo {
            let trailing = expanded_args.trailing_punct();
            if !trailing {
                expanded_args.push_punct(Comma::default());
            }
            expanded_args.push_value(fcinfo.clone());
            if trailing {
                expanded_args.push_punct(Comma::default());
            }
        }

        let fcinfo_ident = arg_ident(&fcinfo_arg);

        let arg_signatures = args
            .iter()
            .chain(std::iter::once(&fcinfo_arg))
            .skip(1)
            .map(|arg| &arg.rust);

        let arg_vals: Punctuated<syn::Pat, Comma> =
            expanded_args.iter().skip(1).map(arg_ident).collect();

        let inner_arg_signatures = expanded_args.iter().map(|arg| &arg.rust);

        let return_type_check = state_type_check_tokens(&ret_type(ret), Some(()));

        // use different variables for these to ensure the type-check is called
        let input_var = syn::Ident::new("__inner", input_ty.span());
        let input_state_var = syn::Ident::new("state", input_ty.span());

        let input_type_check = quote_spanned!(input_ty.span()=>
            let inner: Option<State> = match &mut #input_var {
                None => None,
                Some(inner) => Option::take(&mut **inner),
            };
            let #input_state_var: #input_ty = inner;
        );

        // use different variables for these to ensure the type-check is called
        let result_var = syn::Ident::new("result", ret_type(ret).span());
        let state_var = syn::Ident::new("state", ret_type(ret).span());
        let result_type_check = quote_spanned!(state_var.span()=>
            let #state_var: Option<State> = #result_var;
        );

        quote! {
            #state_type_check
            #return_type_check

            #[pgrx::pg_extern(immutable, parallel_safe #schema)]
            pub fn #outer_ident(
                #input_var: pgrx::Internal,
                #(#arg_signatures,)*
            ) -> Option<Internal> {
                use crate::palloc::{Inner, InternalAsValue, ToInternal};
                unsafe {
                    let mut #input_var: Option<Inner<Option<State>>> = #input_var.to_inner();
                    #input_type_check
                    crate::aggregate_utils::in_aggregate_context(#fcinfo_ident, || {
                        let #result_var = #ident(#input_state_var, #arg_vals);
                        #result_type_check

                        #input_var = match (#input_var, state) {
                            (None, None) => None,
                            (None, state @ Some(..)) => {
                                Some(state.into())
                            },
                            (Some(mut inner), state) => {
                                *inner = state;
                                Some(inner)
                            },
                        };
                        #input_var.internal()
                    })
                }
            }

            pub fn #ident(#(#inner_arg_signatures),*) #ret
                #body
        }
    }

    fn final_fn_tokens(
        &self,
        schema: &Option<syn::Ident>,
        aggregate_name: &syn::Ident,
    ) -> TokenStream2 {
        let outer_ident = self.outer_ident(aggregate_name);
        let Self {
            ident,
            args,
            ret,
            body,
            ..
        } = self;

        let schema = schema.as_ref().map(|s| {
            let s = format!("{s}");
            quote!(, schema = #s)
        });

        let input_ty = &*args[0].rust.ty;

        let state_type_check = type_check_tokens(input_ty, parse_quote!(Option<&mut State>));

        let arg_vals: Punctuated<syn::Pat, Comma> = args.iter().skip(1).map(arg_ident).collect();

        let inner_arg_signatures = args.iter().map(|arg| &arg.rust);

        // use different variables for these to ensure the type-check is called
        let input_var = syn::Ident::new("input", input_ty.span());
        let state_var = syn::Ident::new("state", input_ty.span());
        let input_type_check = quote_spanned!(input_ty.span()=>
            let #state_var: #input_ty = #input_var;
        );

        quote! {
            #state_type_check

            #[pgrx::pg_extern(immutable, parallel_safe #schema)]
            pub fn #outer_ident(
                __internal: pgrx::Internal,
                __fcinfo: pg_sys::FunctionCallInfo
            ) #ret {
                use crate::palloc::InternalAsValue;
                unsafe {
                    let mut #input_var: Option<Inner<Option<State>>> = __internal.to_inner();
                    let #input_var: Option<&mut State> = #input_var.as_deref_mut()
                        .map(|i| i.as_mut()).flatten();
                    #input_type_check
                    #ident(#state_var, #arg_vals)
                }
            }

            pub fn #ident(#(#inner_arg_signatures,)*) #ret
                #body
        }
    }

    fn serialize_fn_tokens(
        &self,
        schema: &Option<syn::Ident>,
        aggregate_name: &syn::Ident,
    ) -> TokenStream2 {
        let outer_ident = self.outer_ident(aggregate_name);
        let Self {
            ident,
            args,
            ret,
            body,
            ..
        } = self;

        let schema = schema.as_ref().map(|s| {
            let s = format!("{s}");
            quote!(, schema = #s)
        });

        let input_ty = &*args[0].rust.ty;
        let state_type_check = refstate_type_check_tokens(input_ty, None);

        let return_type_check = bytea_type_check_tokens(&ret_type(ret));

        let inner_arg_signatures = args.iter().map(|arg| &arg.rust);

        // use different variables for these to ensure the type-check is called
        let input_var = syn::Ident::new("input", input_ty.span());
        let state_var = syn::Ident::new("state", input_ty.span());
        let input_type_check = quote_spanned!(input_ty.span()=>
            let #state_var: #input_ty = #input_var;
        );

        quote! {
            #state_type_check

            #return_type_check

            #[pgrx::pg_extern(strict, immutable, parallel_safe #schema)]
            pub fn #outer_ident(
                __internal: pgrx::Internal,
            ) -> bytea {
                use crate::palloc::{Inner, InternalAsValue};
                let #input_var: Option<Inner<Option<State>>> = unsafe {
                    __internal.to_inner()
                };
                let mut #input_var: Inner<Option<State>> = #input_var.unwrap();
                let #input_var: &mut State = #input_var.as_mut().unwrap();
                #input_type_check
                #ident(#state_var)
            }

            #[allow(clippy::ptr_arg)]
            pub fn #ident(#(#inner_arg_signatures,)*)
            -> bytea
                #body
        }
    }

    fn deserialize_fn_tokens(
        &self,
        schema: &Option<syn::Ident>,
        aggregate_name: &syn::Ident,
    ) -> TokenStream2 {
        let outer_ident = self.outer_ident(aggregate_name);
        let Self {
            ident,
            args,
            ret,
            body,
            ..
        } = self;

        let schema = schema.as_ref().map(|s| {
            let s = format!("{s}");
            quote!(, schema = #s)
        });

        let state_name = arg_ident(&args[0]);

        let state_type_check = bytea_type_check_tokens(&args[0].rust.ty);

        let return_type_check = state_type_check_tokens(&ret_type(ret), None);

        // use different variables for these to ensure the type-check is called
        let result_var = syn::Ident::new("result", ret_type(ret).span());
        let state_var = syn::Ident::new("state", ret_type(ret).span());
        let result_type_check = quote_spanned!(state_var.span()=>
            let #state_var: State = #result_var;
        );

        // int8_avg_deserialize allocates in CurrentMemoryContext, so we do the same
        // https://github.com/postgres/postgres/blob/f920f7e799c587228227ec94356c760e3f3d5f2b/src/backend/utils/adt/numeric.c#L5728-L5770
        quote! {
            #state_type_check

            #return_type_check

            #[pgrx::pg_extern(strict, immutable, parallel_safe #schema)]
            pub fn #outer_ident(
                bytes: crate::raw::bytea,
                _internal: Internal
            ) -> Option<Internal> {
                use crate::palloc::ToInternal;
                let #result_var = #ident(bytes);
                #result_type_check
                let state: Inner<Option<State>> = Some(state).into();
                unsafe {
                    Some(state).internal()
                }
            }

            pub fn #ident(#state_name: crate::raw::bytea) #ret
                #body
        }
    }

    fn combine_fn_tokens(
        &self,
        schema: &Option<syn::Ident>,
        aggregate_name: &syn::Ident,
    ) -> TokenStream2 {
        let outer_ident = self.outer_ident(aggregate_name);
        let Self {
            ident,
            args,
            ret,
            body,
            ..
        } = self;

        let schema = schema.as_ref().map(|s| {
            let s = format!("{s}");
            quote!(, schema = #s)
        });

        let a_name = arg_ident(&args[0]);
        let b_name = arg_ident(&args[1]);

        let state_type_check_a = refstate_type_check_tokens(&args[0].rust.ty, Some(()));
        let state_type_check_b = refstate_type_check_tokens(&args[1].rust.ty, Some(()));

        let return_type_check = state_type_check_tokens(&ret_type(ret), Some(()));
        let inner_arg_signatures = args.iter().map(|arg| &arg.rust);

        // use different variables for these to ensure the type-check is called
        let result_var = syn::Ident::new("result", ret_type(ret).span());
        let state_var = syn::Ident::new("state", ret_type(ret).span());
        let result_type_check = quote_spanned!(state_var.span()=>
            let #state_var: Option<State> = #result_var;
        );

        let mod_counters = make_mod_counters();

        quote! {
            #state_type_check_a
            #state_type_check_b
            #return_type_check
            #mod_counters

            #[pgrx::pg_extern(immutable, parallel_safe #schema)]
            pub fn #outer_ident(
                #a_name: Internal,
                #b_name: Internal,
                __fcinfo: pg_sys::FunctionCallInfo
            ) -> Option<Internal> {
                use crate::palloc::{Inner, InternalAsValue, ToInternal};
                unsafe {
                    crate::aggregate_utils::in_aggregate_context(__fcinfo, || {
                        let a: Option<Inner<State>> = #a_name.to_inner();
                        let b: Option<Inner<State>> = #b_name.to_inner();
                        #[cfg(any(test, feature = "pg_test"))]
                        #aggregate_name::counters::increment_combine(&a, &b);
                        let #result_var = #ident(
                            a.as_deref(),
                            b.as_deref(),
                        );
                        #result_type_check
                        let state = match #state_var {
                            None => None,
                            state @ Some(..) => {
                                let state: Inner<Option<State>> = state.into();
                                Some(state)
                            },
                        };
                        state.internal()
                    })
                }
            }

            #[allow(clippy::ptr_arg)]
            pub fn #ident(#(#inner_arg_signatures,)*) #ret
                #body
        }
    }

    fn outer_ident(&self, aggregate_name: &syn::Ident) -> syn::Ident {
        let name = match &self.sql_name {
            Some(name) => name.value(),
            None => format!("{}_{}_fn_outer", aggregate_name, self.ident),
        };
        syn::Ident::new(&name, Span::call_site())
    }

    fn sql_args(&self) -> impl Iterator<Item = (Option<&syn::Ident>, String)> {
        self.args.iter().skip(1).map(|arg| {
            let ident = match &*arg.rust.pat {
                syn::Pat::Ident(id) => Some(&id.ident),
                _ => None,
            };
            (ident, arg.sql.as_ref().expect("missing sql arg").value())
        })
    }
}

fn arg_ident(arg: &AggregateArg) -> syn::Pat {
    syn::Pat::clone(&*arg.rust.pat)
}

fn make_mod_counters() -> TokenStream2 {
    quote! {
        #[cfg(any(test, feature = "pg_test"))]
        pub mod counters {
            use ::std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
            use crate::palloc::Inner;

            pub static COMBINE_NONE: AtomicUsize = AtomicUsize::new(0);
            pub static COMBINE_A: AtomicUsize = AtomicUsize::new(0);
            pub static COMBINE_B: AtomicUsize = AtomicUsize::new(0);
            pub static COMBINE_BOTH: AtomicUsize = AtomicUsize::new(0);

            // Works as long as only one pg_test is run at a time.  If we have two
            // running in the same process, need a mutex to ensure only one test is
            // using the counters at a time.  Otherwise, a test may see non-zero
            // counters because of another test's work rather than its own.
            pub fn reset() {
                COMBINE_NONE.store(0, Relaxed);
                COMBINE_A.store(0, Relaxed);
                COMBINE_B.store(0, Relaxed);
                COMBINE_BOTH.store(0, Relaxed);
            }

            pub fn increment_combine<T>(a: &Option<Inner<T>>, b: &Option<Inner<T>>) {
                match (a, b) {
                    // TODO Remove COMBINE_NONE?  We suspect postgres never calls with (None, None); what would be the point?
                    (None, None) => COMBINE_NONE.fetch_add(1, Relaxed),
                    // TODO Remove COMBIINE_A?  We suspect postgres never calls with (Some, None), only (None, Some).
                    (Some(_), None) => COMBINE_A.fetch_add(1, Relaxed),
                    (None, Some(_)) => COMBINE_B.fetch_add(1, Relaxed),
                    (Some(_), Some(_)) => COMBINE_BOTH.fetch_add(1, Relaxed),
                };
            }
        }
    }
}

fn ret_type(ret: &syn::ReturnType) -> Cow<'_, syn::Type> {
    match ret {
        syn::ReturnType::Default => Cow::Owned(parse_quote!(())),
        syn::ReturnType::Type(_, ty) => Cow::Borrowed(ty),
    }
}

fn state_type_check_tokens(ty: &syn::Type, optional: Option<()>) -> TokenStream2 {
    match optional {
        Some(..) => type_check_tokens(ty, parse_quote!(Option<State>)),
        None => type_check_tokens(ty, parse_quote!(State)),
    }
}

fn refstate_type_check_tokens(ty: &syn::Type, optional: Option<()>) -> TokenStream2 {
    match optional {
        Some(..) => type_check_tokens(ty, parse_quote!(Option<&State>)),
        None => {
            // we need to allow both &State and &mut State, so we use a
            // different equality-checker for this case than the others
            quote_spanned! {ty.span()=>
                const _: () = {
                    trait RefType {
                        type Referenced;
                    }
                    impl<'a, T> RefType for &'a T { type Referenced = T; }
                    impl<'a, T> RefType for &'a mut T { type Referenced = T; }
                    fn check<T: RefType<Referenced=State>>() {}
                    let _checked = check::<#ty>;
                };
            }
        }
    }
}

fn bytea_type_check_tokens(ty: &syn::Type) -> TokenStream2 {
    type_check_tokens(ty, parse_quote!(bytea))
}

fn type_check_tokens(user_ty: &syn::Type, expected_type: syn::Type) -> TokenStream2 {
    quote_spanned! {user_ty.span()=>
        const _: () = {
            trait SameType {
                type This;
            }
            impl<T> SameType for T { type This = Self; }
            fn check_type<T, U: SameType<This=T>>() {}
            let _checked = check_type::<#expected_type, #user_ty>;
        };
    }
}


================================================
FILE: crates/asap/Cargo.toml
================================================
[package]
name = "asap"
version = "0.1.0"
edition = "2021"

[dependencies]


================================================
FILE: crates/asap/src/fft.rs
================================================
// based on https://github.com/stanford-futuredata/ASAP/blob/8b39db4bc92590cbe5b44ddace9b7bb1d677248b/ASAP-optimized.js
// original copyright notice as follows
//
// Free FFT and convolution (JavaScript)
//
// Copyright (c) 2014 Project Nayuki
// https://www.nayuki.io/page/free-small-fft-in-multiple-languages
//
// (MIT License)
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
// - The above copyright notice and this permission notice shall be included in
//   all copies or substantial portions of the Software.
// - The Software is provided "as is", without warranty of any kind, express or
//   implied, including but not limited to the warranties of merchantability,
//   fitness for a particular purpose and noninfringement. In no event shall the
//   authors or copyright holders be liable for any claim, damages or other
//   liability, whether in an action of contract, tort or otherwise, arising from,
//   out of or in connection with the Software or the use or other dealings in the
//   Software.

// TODO JOSH it looks like they have a rust version as well,
//           we likely should be using that instead

use std::f64::consts::PI;

/*
 * Computes the discrete Fourier transform (DFT) of the given complex vector, storing the result back into the vector.
 * The vector can have any length. This is a wrapper function.
 */
pub fn transform(real: &mut [f64], imag: &mut [f64]) {
    assert_eq!(real.len(), imag.len());

    let n = real.len();
    if n == 0 {
    } else if n & (n - 1) == 0 {
        // Is power of 2
        transform_radix2(real, imag);
    } else {
        // More complicated algorithm for arbitrary sizes
        transform_bluestein(real, imag);
    }
}

/*
 * Computes the inverse discrete Fourier transform (IDFT) of the given complex vector, storing the result back into the vector.
 * The vector can have any length. This is a wrapper function. This transform does not perform scaling, so the inverse is not a true inverse.
 */
pub fn inverse_transform(real: &mut [f64], imag: &mut [f64]) {
    transform(imag, real);
}

/*
 * Computes the discrete Fourier transform (DFT) of the given complex vector, storing the result back into the vector.
 * The vector's length must be a power of 2. Uses the Cooley-Tukey decimation-in-time radix-2 algorithm.
 */
fn transform_radix2(real: &mut [f64], imag: &mut [f64]) {
    // Initialization
    let n = real.len();
    if n == 1 {
        // Trivial transform
        return;
    }
    let mut levels = 100;
    for i in 0..32 {
        if 1 << i == n {
            levels = i; // Equal to log2(n)
        }
    }
    debug_assert!(levels < 32);

    let mut cos_table = vec![0.0; n / 2];
    let mut sin_table = vec![0.0; n / 2];
    for i in 0..n / 2 {
        cos_table[i] = (2.0 * PI * i as f64 / n as f64).cos();
        sin_table[i] = (2.0 * PI * i as f64 / n as f64).sin();
    }

    // Bit-reversed addressing permutation
    for i in 0..n {
        let j = reverse_bits(i as u32, levels) as usize;
        if j > i {
            real.swap(i, j);
            imag.swap(i, j);
        }
    }

    // Cooley-Tukey decimation-in-time radix-2 FFT
    let mut size = 2;
    while size <= n {
        let halfsize = size / 2;
        let tablestep = n / size;
        for i in (0..n).step_by(size) {
            let mut j = i;
            let mut k = 0;
            while j < i + halfsize {
                let tpre = real[j + halfsize] * cos_table[k] + imag[j + halfsize] * sin_table[k];
                let tpim = -real[j + halfsize] * sin_table[k] + imag[j + halfsize] * cos_table[k];
                real[j + halfsize] = real[j] - tpre;
                imag[j + halfsize] = imag[j] - tpim;
                real[j] += tpre;
                imag[j] += tpim;
                j += 1;
                k += tablestep;
            }
        }
        size *= 2;
    }

    // Returns the integer whose value is the reverse of the lowest 'bits' bits of the integer 'x'.
    fn reverse_bits(x: u32, bits: u32) -> u32 {
        let mut x = x;
        let mut y = 0;
        for _ in 0..bits {
            y = (y << 1) | (x & 1);
            x >>= 1;
        }
        y
    }
}

/*
 * Computes the discrete Fourier transform (DFT) of the given complex vector, storing the result back into the vector.
 * The vector can have any length. This requires the convolution function, which in turn requires the radix-2 FFT function.
 * Uses Bluestein's chirp z-transform algorithm.
 */
fn transform_bluestein(real: &mut [f64], imag: &mut [f64]) {
    // Find a power-of-2 convolution length m such that m >= n * 2 + 1
    let n = real.len();
    let mut m = 1;
    while m < n * 2 + 1 {
        m *= 2;
    }

    // Trignometric tables
    let mut cos_table = vec![0.0; n];
    let mut sin_table = vec![0.0; n];
    for i in 0..n {
        let j = (i * i % (n * 2)) as f64; // This is more accurate than j = i * i
        cos_table[i] = (PI * j / n as f64).cos();
        sin_table[i] = (PI * j / n as f64).sin();
    }

    // Temporary vectors and preprocessing
    let mut areal = vec![0.0; m];
    let mut aimag = vec![0.0; m];
    for i in 0..n {
        areal[i] = real[i] * cos_table[i] + imag[i] * sin_table[i];
        aimag[i] = -real[i] * sin_table[i] + imag[i] * cos_table[i];
    }
    for i in n..m {
        areal[i] = 0.0;
        aimag[i] = 0.0;
    }

    let mut breal = vec![0.0; m];
    let mut bimag = vec![0.0; m];
    breal[0] = cos_table[0];
    bimag[0] = sin_table[0];
    for i in 1..n {
        breal[i] = cos_table[i];
        breal[m - i] = cos_table[i];
        bimag[i] = sin_table[i];
        bimag[m - i] = sin_table[i];
    }
    for i in n..=(m - n) {
        breal[i] = 0.0;
        bimag[i] = 0.0;
    }

    // Convolution
    let mut creal = vec![0.0; m];
    let mut cimag = vec![0.0; m];
    convolve_complex(
        &mut areal, &mut aimag, &mut breal, &mut bimag, &mut creal, &mut cimag,
    );

    // Postprocessing
    for i in 0..n {
        real[i] = creal[i] * cos_table[i] + cimag[i] * sin_table[i];
        imag[i] = -creal[i] * sin_table[i] + cimag[i] * cos_table[i];
    }
}

// /*
//  * Computes the circular convolution of the given real vectors. Each vector's length must be the same.
//  */
// function convolveReal(x, y, out) {
//     if (x.length != y.length || x.length != out.length)
//         throw "Mismatched lengths";
//     var zeros = new Array(x.length);
//     for (var i = 0; i < zeros.length; i++)
//         zeros[i] = 0;
//     convolve_complex(x, zeros, y, zeros.slice(), out, zeros.slice());
// }

// /*
//  * Computes the circular convolution of the given complex vectors. Each vector's length must be the same.
//  */
fn convolve_complex(
    xreal: &mut [f64],
    ximag: &mut [f64],
    yreal: &mut [f64],
    yimag: &mut [f64],
    outreal: &mut [f64],
    outimag: &mut [f64],
) {
    let n = xreal.len();

    transform(xreal, ximag);
    transform(yreal, yimag);
    for i in 0..n {
        let temp = xreal[i] * yreal[i] - ximag[i] * yimag[i];
        ximag[i] = ximag[i] * yreal[i] + xreal[i] * yimag[i];
        xreal[i] = temp;
    }
    inverse_transform(xreal, ximag);
    for i in 0..n {
        // Scaling (because this FFT implementation omits it)
        outreal[i] = xreal[i] / n as f64;
        outimag[i] = ximag[i] / n as f64;
    }
}


================================================
FILE: crates/asap/src/lib.rs
================================================
// based on https://github.com/stanford-futuredata/ASAP/blob/8b39db4bc92590cbe5b44ddace9b7bb1d677248b/ASAP-optimized.js
// original copyright notice as follows
//
// Free FFT and convolution (JavaScript)
//
// Copyright (c) 2014 Project Nayuki
// https://www.nayuki.io/page/free-small-fft-in-multiple-languages
//
// (MIT License)
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
// - The above copyright notice and this permission notice shall be included in
//   all copies or substantial portions of the Software.
// - The Software is provided "as is", without warranty of any kind, express or
//   implied, including but not limited to the warranties of merchantability,
//   fitness for a particular purpose and noninfringement. In no event shall the
//   authors or copyright holders be liable for any claim, damages or other
//   liability, whether in an action of contract, tort or otherwise, arising from,
//   out of or in connection with the Software or the use or other dealings in the
//   Software.

mod fft;

// Smooth out the data to promote human readability, resolution is an upper bound on the number of points returned
pub fn asap_smooth(data: &[f64], resolution: u32) -> Vec<f64> {
    use std::borrow::Cow;

    let data = if data.len() > 2 * resolution as usize {
        let period = (data.len() as f64 / resolution as f64) as u32;
        Cow::Owned(sma(data, period, period))
    } else {
        Cow::Borrowed(data)
    };

    let mut acf = Acf::new(&data, (data.len() as f64 / 10.0).round() as u32);
    let peaks = acf.find_peaks();
    let mut metrics = Metrics::new(&data);
    let original_kurt = metrics.kurtosis();
    let mut min_obj = metrics.roughness();
    let mut window_size = 1_u32;
    let mut lb = 1;
    let mut largest_feasible = -1_i32;
    let mut tail = data.len() as u32 / 10;

    for i in (0..peaks.len()).rev() {
        let w = peaks[i];
        if w < lb || w == 1 {
            break;
        } else if (1.0 - acf.correlations[w as usize]).sqrt() * window_size as f64
            > (1.0 - acf.correlations[window_size as usize]).sqrt() * w as f64
        {
            continue;
        }
        let smoothed = sma(&data, w, 1);
        metrics = Metrics::new(&smoothed);
        if metrics.kurtosis() >= original_kurt {
            let roughness = metrics.roughness();
            if roughness < min_obj {
                min_obj = roughness;
                window_size = w;
            }
            let test_lb =
                w as f64 * (acf.max_acf - 1.0).sqrt() / (acf.correlations[w as usize] - 1.0);
            if test_lb > lb as f64 {
                lb = test_lb.round() as u32;
            }
            if largest_feasible < 0 {
                largest_feasible = i as i32;
            }
        }
    }

    if largest_feasible > 0 {
        if largest_feasible < (peaks.len() - 2) as i32 {
            tail = peaks[(largest_feasible + 1) as usize];
        }
        if peaks[largest_feasible as usize] + 1 > lb {
            lb = peaks[largest_feasible as usize] + 1;
        }
    }

    window_size = binary_search(lb, tail, &data, min_obj, original_kurt, window_size);
    sma(&data, window_size, 1)
}

fn binary_search(
    head: u32,
    tail: u32,
    data: &[f64],
    min_obj: f64,
    original_kurt: f64,
    window_size: u32,
) -> u32 {
    let mut head = head;
    let mut tail = tail;
    let mut min_obj = min_obj;
    let mut window_size = window_size;
    while head <= tail {
        let w = (head + tail).div_ceil(2);
        let smoothed = sma(data, w, 1);
        let metrics = Metrics::new(&smoothed);
        if metrics.kurtosis() >= original_kurt {
            /* Search second half if feasible */
            let roughness = metrics.roughness();
            if roughness < min_obj {
                window_size = w;
                min_obj = roughness;
            }
            head = w + 1;
        } else {
            /* Search first half */
            tail = w - 1;
        }
    }
    window_size
}

fn sma(data: &[f64], range: u32, slide: u32) -> Vec<f64> {
    let mut window_start = 0;
    let mut sum = 0.0;
    let mut count = 0;
    let mut values = Vec::new();

    for (i, val) in data.iter().enumerate() {
        sum += val;
        count += 1;
        if i + 1 - window_start >= range as usize {
            values.push(sum / count as f64);
            let old_start = window_start;
            while window_start < data.len() && window_start - old_start < slide as usize {
                sum -= data[window_start];
                count -= 1;
                window_start += 1;
            }
        }
    }

    values
}

fn mean(values: &[f64]) -> f64 {
    values.iter().sum::<f64>() / values.len() as f64
}

fn std(values: &[f64]) -> f64 {
    let m = mean(values);

    let std: f64 = values.iter().map(|&x| (x - m).powi(2)).sum();
    (std / values.len() as f64).sqrt()
}

impl<'a> Acf<'a> {
    fn new(values: &'a [f64], max_lag: u32) -> Acf<'a> {
        let mut acf = Acf {
            mean: mean(values),
            values,
            correlations: Vec::with_capacity(max_lag as usize),
            max_acf: 0.0,
        };
        acf.calculate();
        acf
    }

    fn calculate(&mut self) {
        /* Padding to the closest power of 2 */
        let len = (2_u32).pow((self.values.len() as f64).log2() as u32 + 1);
        let mut fftreal = vec![0.0; len as usize];
        let mut fftimg = vec![0.0; len as usize];

        for (i, real) in fftreal.iter_mut().enumerate().take(self.values.len()) {
            *real = self.values[i] - self.mean;
        }

        /* F_R(f) = FFT(X) */
        fft::transform(&mut fftreal, &mut fftimg);

        /* S(f) = F_R(f)F_R*(f) */
        for i in 0..fftreal.len() {
            fftreal[i] = fftreal[i].powi(2) + fftimg[i].powi(2);
            fftimg[i] = 0.0;
        }

        /*  R(t) = IFFT(S(f)) */
        fft::inverse_transform(&mut fftreal, &mut fftimg);
        for i in 1..self.correlations.len() {
            self.correlations[i] = fftreal[i] / fftreal[0];
        }
    }

    fn find_peaks(&mut self) -> Vec<u32> {
        const CORR_THRESH: f64 = 0.2;

        let mut peak_indicies = Vec::new();

        if self.correlations.len() > 1 {
            let mut positive = self.correlations[1] > self.correlations[0];
            let mut max = 1;
            for i in 2..self.correlations.len() {
                if !positive && self.correlations[i] > self.correlations[i - 1] {
                    max = i;
                    positive = !positive;
                } else if positive && self.correlations[i] > self.correlations[max] {
                    max = i;
                } else if positive
                    && self.correlations[i] < self.correlations[i - 1]
                    && max > 1
                    && self.correlations[max] > CORR_THRESH
                {
                    peak_indicies.push(max as u32);
                    if self.correlations[max] > self.max_acf {
                        self.max_acf = self.correlations[max];
                    }
                    positive = !positive;
                }
            }
        }

        /* If there is no autocorrelation peak within the MAX_WINDOW boundary,
        # try windows from the largest to the smallest */

        if peak_indicies.len() <= 1 {
            for i in 2..self.correlations.len() {
                peak_indicies.push(i as u32);
            }
        }

        peak_indicies
    }
}

struct Metrics<'a> {
    len: u32,
    values: &'a [f64],
    m: f64,
}

impl Metrics<'_> {
    fn new(values: &[f64]) -> Metrics<'_> {
        Metrics {
            len: values.len() as u32,
            values,
            m: mean(values),
        }
    }

    fn kurtosis(&self) -> f64 {
        let mut u4 = 0.0;
        let mut variance = 0.0;

        for value in self.values {
            u4 += (value - self.m).powi(4);
            variance += (value - self.m).powi(2);
        }

        self.len as f64 * u4 / variance.powi(2)
    }

    fn roughness(&self) -> f64 {
        std(&self.diffs())
    }

    fn diffs(&self) -> Vec<f64> {
        let mut diff = vec![0.0; (self.len - 1) as usize];
        for i in 1..self.len as usize {
            diff[i - 1] = self.values[i] - self.values[i - 1];
        }
        diff
    }
}

struct Acf<'a> {
    mean: f64,
    values: &'a [f64],
    correlations: Vec<f64>,
    max_acf: f64,
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn simple_sma_test() {
        let data = vec![0.0, 1.0, 2.0, 3.0, 4.0];

        let test = sma(&data, 3, 1);
        assert_eq!(test, vec![1.0, 2.0, 3.0]);
    }

    #[test]
    fn sma_slide_test() {
        let data = vec![0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0];

        let test = sma(&data, 3, 2);
        assert_eq!(test, vec![1.0, 3.0, 5.0]);
    }

    #[test]
    fn sma_slide_unaliged_test() {
        let data = vec![0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0];

        let test = sma(&data, 3, 2);
        assert_eq!(test, vec![1.0, 3.0, 5.0]);
    }

    #[test]
    fn sma_downsample_test() {
        let data = vec![0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0];

        let test = sma(&data, 2, 2);
        assert_eq!(test, vec![0.5, 2.5, 4.5, 6.5]);
    }

    #[test]
    fn test_roughness_and_kurtosis() {
        let series_a = vec![-1.0, 1.0, -1.0, 1.0, -1.0, 1.0]; // bimodal
        let x = (3.0 - ((9.0 + 8.0 * 1.82) as f64).sqrt()) / 4.0; // ~ -0.45, calculated for specific mean and std
        let series_b = vec![-1.0, -0.8, x, -0.2, 0.5, 1.5 - x]; // uneven but monotonically increasing
        let x = ((1.0 / 2.0) as f64).sqrt();
        let series_c = vec![-2.0 * x, -1.0 * x, 0.0, x, 2.0 * x]; // linear

        assert_eq!(mean(&series_a), 0.0);
        assert_eq!(std(&series_a), 1.0);
        assert_eq!(mean(&series_b), 0.0);
        assert_eq!(std(&series_b), 1.0);
        assert!(mean(&series_c).abs() < 0.000000000001); // float precision breaks == 0 here
        assert_eq!(std(&series_c), 1.0);

        let test = Metrics::new(&series_a);
        assert_eq!(
            test.roughness(),
            ((3.0 * 1.6_f64.powi(2) + 2.0 * 2.4_f64.powi(2)) / 5.0).sqrt()
        );
        assert_eq!(test.kurtosis(), 1.0);
        let test = Metrics::new(&series_b);
        assert_eq!(test.roughness(), 0.4686099077599554); // manually verified
        assert!((test.kurtosis() - 2.7304).abs() < 0.000000000001); // = 2.7304
        let test = Metrics::new(&series_c);
        assert_eq!(test.roughness(), 0.0);
        assert!((test.kurtosis() - 1.7).abs() < 0.000000000001); // == 1.7
    }

    #[test]
    fn test_smoothing() {
        // Monthly English temperature data from 1723 through 1970 (~3k pts)
        #[rustfmt::skip]
	let data = vec![1.1,4.4,7.5,8.9,11.7,15.0,15.3,15.6,13.3,11.1,7.5,5.8,5.6,4.2,4.7,7.2,11.4,15.3,15.0,16.2,14.4,8.6,5.3,3.3,4.4,3.3,5.0,8.1,10.8,12.2,13.8,13.3,12.8,9.4,6.9,3.9,1.1,4.2,4.2,8.4,13.4,16.4,16.0,15.6,14.7,10.2,6.1,1.8,4.2,5.0,5.1,9.2,13.6,14.9,16.9,16.9,14.4,10.8,4.7,3.6,3.9,2.4,7.1,8.3,12.5,16.4,16.9,16.0,12.8,9.1,7.2,1.6,1.2,2.3,2.8,7.1,10.3,15.1,16.8,15.7,16.6,10.1,8.1,5.0,4.1,4.7,6.2,8.7,12.4,14.0,15.3,16.3,15.3,10.9,9.2,3.4,1.9,2.2,6.0,6.8,12.1,15.6,16.3,16.7,15.3,12.3,7.8,5.2,2.4,6.4,6.1,8.9,11.4,14.6,16.0,16.6,14.5,10.9,6.3,2.2,6.9,6.0,5.9,10.0,11.2,15.2,18.3,16.1,12.8,9.1,6.5,7.6,4.3,6.4,8.1,9.3,11.1,14.1,16.2,16.2,13.3,8.4,6.2,4.0,4.4,4.0,5.8,8.9,10.9,13.3,14.8,16.2,14.2,10.3,6.3,5.4,6.4,3.1,6.9,8.6,10.6,15.7,16.4,17.8,14.4,10.4,6.9,6.4,6.2,4.2,6.1,8.8,12.5,15.9,17.4,13.8,14.2,8.9,6.1,4.9,4.6,4.6,5.5,9.9,11.4,14.2,16.4,16.0,12.5,10.2,6.3,6.1,4.0,6.8,5.8,6.7,11.6,15.2,16.0,14.7,13.1,9.6,3.7,3.2,2.8,1.6,3.9,6.4,8.6,12.8,15.3,14.7,14.0,5.3,3.3,2.2,1.7,4.4,4.2,7.1,9.3,15.2,15.6,16.7,14.7,11.0,7.8,3.9,1.9,3.6,4.1,6.6,10.6,15.0,15.8,15.8,12.2,9.2,4.4,1.1,3.6,5.4,5.3,5.4,13.3,15.6,14.9,16.9,14.2,8.9,9.3,4.9,1.4,2.9,4.8,6.7,10.8,14.4,16.4,15.4,12.8,9.4,6.9,3.5,3.8,2.3,4.4,7.5,11.4,12.2,16.1,15.0,14.2,10.3,5.8,2.7,2.5,1.4,3.1,6.9,12.8,14.3,15.8,15.9,14.2,7.8,3.3,5.3,3.3,5.8,2.5,8.1,12.2,14.7,16.9,18.3,14.4,9.4,6.9,5.3,2.5,1.8,1.8,6.3,10.4,14.8,15.4,15.8,14.2,9.2,7.1,6.0,5.3,3.6,5.3,6.8,12.3,11.9,17.2,15.6,13.8,10.1,6.7,4.7,4.0,6.7,8.2,7.7,10.7,14.2,17.2,15.0,15.2,9.2,4.0,4.2,4.0,1.5,6.2,7.1,9.3,14.9,15.3,14.6,12.6,8.3,4.3,3.0,3.2,3.1,5.6,6.8,10.3,14.8,15.6,15.7,13.9,10.6,6.5,4.2,2.2,3.6,6.0,7.5,12.1,14.6,15.2,15.7,13.1,10.0,4.6,4.4,3.3,2.8,3.4,6.7,12.2,13.6,14.7,15.7,13.9,10.4,5.7,3.6,2.2,1.2,3.9,10.0,9.4,15.7,15.0,14.6,13.5,8.4,4.7,3.9,4.4,4.6,6.0,6.7,9.1,13.8,16.1,14.7,13.6,9.4,3.9,2.9,0.3,4.0,4.9,8.1,10.7,14.0,18.4,15.2,13.3,8.2,7.1,3.2,2.6,3.8,5.2,7.2,13.8,14.6,14.2,16.4,11.9,8.1,5.7,3.9,5.9,5.8,6.1,8.6,12.1,15.0,18.2,16.3,13.5,10.9,5.1,2.5,1.9,3.8,6.6,9.4,11.7,15.2,16.9,15.8,15.7,9.2,5.7,6.1,5.4,5.8,6.8,9.4,11.9,14.3,15.8,16.4,14.2,9.4,6.2,4.4,4.7,4.0,3.7,10.0,12.9,16.9,17.8,15.3,13.6,7.9,4.6,3.6,0.8,4.9,5.4,8.9,10.2,14.6,15.3,15.3,13.1,8.3,5.8,6.2,3.7,3.8,3.9,7.2,12.2,13.9,16.1,15.2,12.5,8.9,4.4,2.8,4.8,0.4,5.0,7.5,11.4,13.8,15.7,15.3,13.3,9.2,3.9,1.7,0.7,1.7,4.2,8.1,9.7,13.7,15.7,16.6,13.3,9.3,7.2,3.3,0.1,5.4,4.7,7.3,10.0,12.8,14.4,16.1,14.1,9.2,6.9,3.3,0.8,4.8,4.7,8.1,12.2,13.9,15.6,16.0,11.7,9.2,5.6,4.6,2.5,2.7,5.0,7.8,11.3,13.1,16.4,15.0,12.8,8.2,5.7,4.8,3.7,4.6,2.5,5.4,10.0,13.1,15.3,15.8,13.9,8.9,5.3,3.6,1.0,3.2,3.1,5.5,12.2,14.3,15.7,14.3,12.2,9.2,6.3,5.6,1.2,1.9,4.4,6.4,10.1,16.1,16.9,16.1,13.0,11.7,7.2,4.8,4.0,2.6,6.5,8.3,10.3,14.7,15.9,17.2,12.4,9.9,5.3,3.8,0.6,4.3,6.4,8.6,10.9,14.7,16.1,16.1,12.5,10.3,4.8,3.5,4.6,6.1,6.0,9.8,12.6,16.6,16.7,15.8,14.3,9.3,4.8,4.5,1.6,3.8,6.4,9.4,10.8,14.1,16.3,15.2,12.9,10.2,6.2,4.4,1.9,2.3,6.8,7.2,11.7,13.6,15.3,15.9,14.6,10.2,6.9,2.6,1.9,3.2,4.6,8.2,10.6,15.4,17.3,16.8,12.2,7.4,6.7,6.1,2.9,7.9,7.9,9.4,11.9,14.4,17.9,17.6,15.2,10.9,5.7,3.1,0.9,2.1,7.9,6.3,12.8,14.2,16.8,17.6,15.6,9.1,4.4,3.2,2.1,4.8,6.6,9.2,12.1,16.2,17.4,17.3,14.2,10.6,6.5,5.4,5.2,1.9,4.1,5.2,9.0,14.9,15.6,14.2,13.3,7.6,2.3,2.8,3.4,3.3,3.3,10.1,10.4,14.8,18.8,15.8,12.8,9.8,6.2,2.7,0.6,1.4,2.7,5.7,13.5,13.7,15.2,14.0,14.8,7.8,5.5,0.3,3.4,0.4,1.2,8.4,12.3,16.1,16.1,13.9,13.6,8.7,5.6,2.8,2.7,3.4,2.1,8.1,11.2,16.1,15.0,15.1,11.7,7.5,3.3,2.8,3.6,5.9,6.8,7.4,11.5,13.9,15.8,15.6,12.8,9.8,4.5,3.8,3.9,3.8,3.6,9.4,13.8,15.4,15.8,15.8,13.4,9.8,6.1,0.3,1.5,5.0,2.1,7.4,12.5,14.0,15.4,16.6,13.1,8.6,4.6,6.1,4.3,6.6,6.4,6.1,11.9,14.6,14.9,15.6,12.2,10.3,6.1,4.3,4.3,4.7,6.5,9.6,10.6,14.7,15.3,15.9,13.8,8.9,5.9,1.1,2.3,4.5,5.9,10.0,10.2,13.2,15.3,16.9,11.8,8.8,7.1,4.3,2.8,4.6,4.2,6.2,10.9,13.5,17.6,15.0,11.7,11.3,6.0,5.3,1.8,7.2,7.0,10.2,11.3,15.7,18.1,15.5,12.5,9.6,6.1,3.7,3.1,0.8,3.9,7.7,10.9,13.2,15.2,16.6,16.0,11.7,4.5,6.6,7.3,4.7,4.2,10.2,10.3,13.9,14.7,15.9,14.6,8.1,4.6,0.3,3.5,4.6,4.3,7.4,11.3,13.6,17.3,15.8,12.5,8.2,4.7,4.8,3.6,4.0,5.1,10.4,12.9,16.9,16.3,16.4,13.6,9.9,4.7,1.5,1.7,2.8,3.4,5.4,9.6,14.1,15.2,14.4,12.9,8.3,5.6,1.3,2.8,2.2,4.0,9.3,12.3,13.9,17.7,16.8,13.9,9.2,5.4,3.3,4.6,4.8,6.7,8.3,12.1,14.8,16.1,17.1,14.2,10.2,4.8,1.5,1.6,3.7,5.6,8.9,10.6,13.7,13.5,17.2,13.8,10.1,5.1,3.6,1.8,3.4,6.3,9.1,10.5,13.7,17.6,16.1,11.4,9.3,5.0,4.4,5.8,2.9,4.7,6.9,13.3,16.1,15.9,15.6,14.2,10.7,6.6,2.1,2.1,4.1,6.2,8.3,10.2,13.2,16.0,16.4,14.6,8.2,4.8,3.6,4.2,4.3,5.1,6.8,12.1,14.9,15.4,16.2,13.4,10.6,7.8,6.8,2.8,3.7,2.9,7.7,11.8,14.2,17.1,16.9,10.5,11.4,2.9,1.8,2.6,2.8,3.2,5.8,13.7,14.8,18.4,16.7,12.7,7.2,6.0,2.2,2.0,5.7,6.0,5.2,13.1,13.7,15.1,14.8,12.7,10.2,4.6,4.1,2.2,3.5,4.9,8.2,9.2,14.6,15.2,14.6,13.9,9.8,5.4,3.6,1.2,4.6,7.1,8.9,12.8,14.1,16.1,14.4,13.7,12.3,7.7,3.1,2.6,5.3,3.5,5.5,10.9,13.0,14.2,14.3,13.2,9.3,4.9,1.7,1.9,5.8,6.8,7.6,11.6,13.6,15.0,14.5,12.5,8.1,4.3,2.8,2.9,1.4,2.9,9.6,9.2,12.2,16.0,14.7,12.8,8.1,4.7,4.3,0.3,6.5,7.3,8.1,12.6,14.3,14.9,15.3,13.4,10.3,3.4,2.3,2.7,2.1,3.9,6.6,9.9,12.8,13.4,13.9,11.8,10.3,3.9,3.1,4.5,6.4,5.5,7.6,8.7,15.1,14.1,13.6,13.2,6.4,9.1,2.5,4.4,2.7,4.5,6.9,11.3,16.4,18.2,15.3,13.3,12.0,9.5,3.6,4.4,4.3,6.8,8.6,11.5,13.4,16.4,17.4,13.4,9.1,4.1,1.4,0.3,3.2,4.7,8.9,11.4,13.6,15.7,14.7,12.3,8.1,5.6,4.7,3.6,2.1,5.7,9.5,9.4,12.3,14.8,16.4,14.9,10.4,8.6,6.4,4.7,6.3,7.8,8.3,12.7,17.1,15.6,15.2,12.4,10.7,8.2,1.6,0.1,3.1,5.0,6.7,12.2,12.3,14.1,14.4,12.5,8.4,7.1,4.8,4.3,4.7,4.6,7.4,10.7,13.4,16.0,15.1,13.7,9.5,7.2,5.1,3.8,3.9,5.0,9.1,11.6,14.1,17.2,16.3,15.1,10.8,5.2,4.6,0.4,6.4,6.3,8.8,11.2,17.3,17.9,17.6,13.6,11.1,4.4,5.8,1.7,0.7,5.9,8.9,11.9,14.2,16.5,14.8,13.7,11.4,6.9,6.9,5.1,5.2,6.6,8.3,12.4,15.4,16.0,15.3,14.3,10.2,7.4,7.4,0.3,4.3,4.3,6.7,12.5,14.9,15.1,14.3,11.3,8.3,4.5,1.4,0.2,2.2,7.7,8.9,12.0,12.7,16.2,13.7,11.9,10.4,6.9,1.9,1.6,4.8,7.2,9.2,11.5,15.4,16.7,16.9,13.7,12.7,5.6,5.8,3.1,3.4,5.8,8.6,10.9,15.2,15.9,15.4,13.6,10.7,5.9,5.1,1.2,5.6,3.9,7.7,15.1,14.6,15.8,14.3,12.1,10.1,6.6,6.9,7.1,5.6,7.1,7.7,13.0,15.4,16.9,16.2,13.8,10.6,6.7,5.6,2.9,5.7,5.8,8.6,11.3,15.0,16.4,16.9,13.4,8.9,6.6,3.1,3.7,3.5,5.8,7.2,11.1,15.3,15.4,14.6,11.7,8.6,5.3,4.1,2.7,4.7,2.3,4.7,9.9,15.5,16.9,15.7,12.5,10.5,5.2,5.3,1.5,0.4,4.9,6.1,10.5,14.4,15.6,15.1,12.7,9.8,4.6,4.0,2.8,4.1,4.2,6.4,10.2,14.3,14.9,14.6,12.4,9.3,7.3,3.7,4.1,3.6,3.8,9.7,11.4,14.1,13.8,15.9,11.1,7.5,11.4,1.3,1.1,2.4,7.5,7.8,12.7,12.9,13.8,14.6,13.4,8.7,5.2,4.4,0.6,4.2,6.4,7.8,11.4,15.6,14.5,17.1,13.2,7.2,5.5,7.2,4.0,1.9,5.6,8.5,10.4,12.8,14.8,15.3,14.4,7.9,5.7,7.4,3.8,1.6,4.7,9.8,10.8,14.7,15.4,13.5,13.3,9.2,5.9,0.4,3.2,0.9,2.0,8.6,9.5,14.9,14.3,13.5,11.4,9.5,6.7,4.6,6.3,6.4,6.1,7.8,12.3,18.2,16.5,16.6,14.7,9.5,6.9,0.5,2.2,2.4,5.6,6.6,12.3,13.9,17.5,15.2,11.5,10.7,7.9,4.8,1.3,6.1,5.9,8.2,13.9,14.5,15.6,13.6,12.8,9.7,5.8,5.6,3.9,5.7,6.1,6.4,12.1,13.9,15.4,15.6,13.3,9.2,6.6,3.4,0.7,6.4,4.7,9.0,10.1,15.4,16.2,14.5,12.3,7.9,7.4,4.6,5.6,4.7,5.8,7.4,10.4,14.3,14.6,15.5,12.7,10.8,3.1,4.8,4.9,4.7,5.2,8.2,10.6,13.2,18.7,15.8,12.9,7.8,7.9,7.7,5.1,0.6,3.4,7.6,10.9,14.3,14.9,14.7,12.3,10.1,5.2,1.3,3.6,4.3,6.7,9.2,10.3,13.2,15.4,15.2,14.4,9.4,4.9,5.1,2.4,1.7,3.3,7.1,8.8,13.3,16.8,15.7,13.2,9.7,5.3,2.4,3.7,5.3,4.4,8.1,9.4,13.5,15.3,16.9,12.5,10.7,4.8,4.4,2.6,4.3,5.3,7.5,11.3,15.8,16.4,17.4,14.5,11.3,7.2,7.3,3.4,1.8,4.9,7.7,10.8,16.8,14.8,15.8,14.7,9.6,4.3,4.8,4.9,5.7,7.3,7.5,11.7,14.8,18.3,16.2,12.9,9.6,4.8,1.6,3.5,1.7,4.7,5.9,11.5,12.3,14.5,13.6,11.2,9.8,4.5,1.5,1.7,4.9,6.3,7.5,10.4,14.8,15.0,15.9,13.1,11.8,4.2,3.9,3.9,5.1,5.5,8.8,12.3,12.7,14.2,14.6,13.0,10.3,3.2,6.5,4.9,5.8,6.5,8.8,10.6,13.6,15.2,15.5,11.6,9.9,7.3,6.3,2.4,2.3,4.7,8.8,12.4,13.6,15.5,14.2,13.3,9.8,5.5,3.7,2.1,2.3,2.9,10.6,12.6,15.6,16.6,15.1,16.3,9.7,6.7,5.8,5.8,4.4,4.8,8.6,10.0,15.5,15.5,14.7,12.8,10.7,6.9,6.1,1.2,6.9,3.1,9.3,11.4,14.1,14.9,16.3,13.6,9.3,4.8,3.4,3.9,6.3,6.8,8.7,13.5,15.5,18.3,16.8,14.3,8.4,4.9,7.2,5.6,7.5,3.8,10.1,9.6,13.2,17.3,15.5,14.4,9.7,5.8,2.9,3.3,2.8,4.7,9.2,11.7,15.2,17.5,15.7,12.9,9.5,4.7,0.6,0.5,6.1,7.3,8.7,11.3,12.8,15.2,17.2,12.7,9.8,3.4,3.6,5.0,6.9,6.8,8.2,9.7,14.1,17.1,15.3,13.2,8.4,7.0,5.3,5.2,1.8,5.4,7.7,9.9,14.2,16.2,15.4,11.8,8.6,6.3,5.3,5.5,3.9,6.7,9.8,10.0,13.9,17.3,15.1,13.6,10.4,5.6,0.2,6.4,2.3,5.1,8.6,12.3,14.2,14.8,16.1,14.9,8.9,5.4,4.2,3.2,4.8,4.5,7.9,9.6,14.3,17.2,16.5,12.7,11.3,6.1,6.0,5.5,6.2,4.9,7.0,9.1,15.2,14.7,15.2,11.2,9.3,7.1,4.7,4.6,5.6,5.4,8.9,11.8,15.1,16.6,16.2,13.3,10.2,3.5,0.3,0.7,3.1,4.7,5.7,8.9,12.9,13.6,14.5,12.6,8.9,4.1,0.7,0.9,5.8,6.2,7.9,10.4,13.8,15.5,16.4,14.6,7.1,5.4,5.1,1.5,3.2,5.3,7.3,11.8,13.7,16.2,13.9,12.7,7.3,8.9,3.9,5.2,6.1,7.4,8.4,11.5,13.1,15.2,14.9,12.1,9.9,5.7,3.9,4.7,5.9,1.8,8.1,10.6,13.9,14.5,15.3,13.3,9.7,5.8,4.6,6.5,5.3,6.5,7.2,11.3,14.1,16.3,17.2,14.5,9.4,5.3,4.4,2.9,5.8,4.5,7.7,8.9,13.9,16.3,13.6,12.2,7.5,5.9,3.7,2.1,1.5,4.2,7.6,10.3,13.5,15.9,15.8,13.6,11.3,6.6,1.9,2.4,3.8,3.3,6.2,9.4,15.3,17.3,15.7,11.8,7.1,4.4,2.6,3.2,1.8,3.1,6.2,10.7,13.2,13.7,14.1,12.2,7.9,7.7,4.9,3.4,2.9,4.7,7.1,12.9,15.3,15.3,14.7,12.8,8.6,6.9,3.3,5.7,3.1,6.2,7.1,11.7,13.5,14.5,14.1,14.6,9.4,5.7,0.8,1.3,3.9,3.8,6.2,9.5,14.7,15.1,14.1,14.2,9.4,5.6,4.1,2.3,3.6,2.7,7.3,11.6,13.4,14.3,15.2,12.4,7.1,6.4,1.8,2.2,4.7,7.2,10.3,13.1,15.6,16.4,17.4,12.9,9.9,5.2,4.8,3.4,5.1,6.7,9.7,9.2,13.5,15.9,14.2,11.6,9.3,7.9,5.1,0.2,1.8,5.1,8.2,12.4,14.8,15.2,15.8,15.4,7.1,7.5,3.9,4.8,4.6,6.7,9.1,11.9,16.2,16.2,14.3,13.1,6.9,4.3,3.9,1.6,5.8,6.5,7.1,10.0,15.1,16.5,16.2,12.1,9.9,7.6,4.7,6.6,4.8,4.3,8.5,10.2,13.6,15.3,16.5,15.2,11.3,7.2,7.3,4.9,5.1,5.1,7.8,9.9,15.7,17.3,17.8,13.2,8.8,8.5,2.2,4.4,2.6,3.7,8.3,10.3,14.7,17.7,15.1,13.6,9.8,7.3,7.2,3.5,2.3,4.1,8.6,11.5,13.9,18.0,15.6,13.9,9.7,4.8,3.4,4.7,1.5,6.7,7.5,8.9,13.9,14.7,14.3,12.8,9.6,6.8,4.6,4.2,7.1,7.1,6.4,11.1,13.0,15.3,14.3,13.1,10.5,6.4,3.3,4.1,3.4,4.3,8.7,10.9,13.3,17.1,15.1,12.5,9.7,5.2,3.7,3.6,5.2,6.8,7.3,10.8,14.7,17.2,14.7,12.4,7.1,4.9,4.9,5.3,3.1,5.1,7.3,10.5,14.3,15.8,16.7,13.9,10.9,7.3,3.0,3.6,2.8,6.3,7.6,10.5,12.4,14.1,14.3,13.6,9.8,6.5,4.6,2.5,5.3,4.3,6.0,12.4,14.3,15.8,14.6,12.9,12.9,7.4,3.9,3.5,2.9,3.7,8.7,11.0,11.8,14.6,15.4,11.9,10.4,4.8,3.9,3.5,5.1,5.6,7.3,11.2,14.7,14.2,15.2,12.5,10.6,3.2,6.4,3.8,4.8,5.2,7.5,12.9,14.5,18.2,18.2,13.9,9.3,6.1,6.2,3.6,5.4,7.2,8.8,12.1,13.9,16.1,12.9,11.1,8.2,6.3,6.7,4.5,4.8,6.2,7.9,11.4,14.3,14.6,15.2,14.1,10.9,8.4,5.1,3.7,6.8,6.1,9.8,10.8,14.5,15.8,16.1,13.3,10.3,6.8,4.6,4.1,4.3,5.2,7.9,10.8,14.4,14.6,15.3,13.4,9.1,2.8,5.3,7.5,3.8,3.3,8.2,11.6,11.8,15.3,16.4,13.0,10.6,6.8,1.9,1.6,0.9,3.2,5.4,12.8,15.2,16.1,15.3,14.0,7.5,7.8,2.3,3.8,6.5,5.7,6.7,13.0,13.3,15.4,16.1,11.9,9.3,5.5,6.9,2.9,1.9,3.6,7.1,13.5,14.3,13.9,15.7,12.7,7.4,3.3,5.5,5.2,6.0,7.2,8.2,11.8,14.4,14.1,13.6,13.0,10.4,6.8,4.2,7.3,4.8,7.4,8.0,11.5,14.7,18.5,15.4,14.1,12.8,4.6,6.5,3.7,4.4,4.6,5.5,12.7,13.8,13.7,13.6,12.2,8.2,5.9,5.8,5.6,5.6,6.5,7.6,9.2,12.5,17.5,15.2,12.5,9.7,3.3,3.8,4.7,3.3,4.1,6.9,11.6,13.9,15.3,14.1,13.3,10.1,7.1,6.8,5.3,5.2,4.9,7.5,11.6,15.0,16.8,15.4,11.5,10.4,3.6,2.8,4.6,6.8,6.3,9.3,10.2,13.6,17.1,16.2,14.4,8.1,5.9,4.2,4.6,3.9,7.3,7.9,11.2,12.6,15.9,15.7,12.5,10.5,6.2,2.1,5.2,5.8,6.3,8.5,10.9,12.9,16.1,15.3,12.8,10.1,7.6,3.4,1.3,0.4,6.2,6.8,11.3,13.3,16.0,15.4,15.3,9.6,6.7,5.8,5.6,2.5,5.3,8.3,10.7,15.3,15.2,15.7,13.6,10.5,6.2,4.3,3.2,3.9,4.2,7.7,11.4,14.4,15.3,14.4,11.5,8.8,7.8,5.3,6.3,2.9,4.7,6.9,10.5,14.1,16.1,17.1,12.9,8.8,6.6,5.7,2.2,4.3,7.3,8.8,12.2,15.6,17.8,17.6,14.9,10.1,5.6,1.6,4.1,3.8,4.8,8.0,11.3,14.9,18.2,15.4,14.6,10.6,6.1,8.1,4.5,5.8,6.6,8.2,9.9,15.1,17.1,16.6,13.6,9.5,6.9,2.8,3.7,2.6,7.1,6.3,11.5,14.7,15.3,16.1,14.4,9.4,5.5,5.3,5.2,5.6,3.6,9.2,12.2,14.1,16.1,16.9,13.4,10.4,5.1,3.0,5.7,5.1,9.1,7.6,10.7,14.4,15.2,16.3,13.8,10.5,9.4,4.4,4.2,5.6,5.8,8.8,11.4,14.2,15.5,16.4,14.2,8.2,8.7,3.2,1.4,2.6,6.0,8.7,12.5,16.4,15.1,15.6,12.8,9.6,6.9,3.8,0.5,3.5,5.1,6.4,9.4,15.1,17.3,14.7,14.5,10.4,6.6,5.6,0.9,0.1,5.2,9.2,11.1,14.4,15.5,16.6,13.6,10.4,4.9,6.7,4.9,6.1,6.5,10.5,11.8,14.4,16.4,16.1,13.3,10.6,6.3,3.5,5.8,3.6,5.2,10.2,11.4,13.5,16.5,17.0,12.5,9.3,6.2,3.6,0.4,7.1,7.9,10.1,12.2,14.6,16.7,15.9,14.4,11.9,7.2,4.9,2.7,5.9,5.1,9.9,10.7,13.1,16.3,14.7,14.0,9.8,8.1,3.1,2.2,1.9,3.6,8.6,13.5,15.5,17.0,18.6,14.9,10.6,7.2,5.1,5.4,4.7,8.3,9.0,11.4,13.5,15.8,15.1,13.8,10.1,7.3,5.7,5.5,5.7,5.1,10.0,11.2,15.3,17.4,16.8,16.3,11.7,6.6,5.8,4.2,5.3,7.4,7.6,11.3,16.2,15.9,15.6,12.9,9.6,5.7,1.2,3.9,3.7,4.1,6.8,10.1,14.0,16.3,14.8,14.1,9.4,8.5,5.5,2.7,3.4,6.6,9.6,13.4,14.4,16.8,15.8,10.7,8.8,4.1,2.8,3.3,4.3,5.6,7.3,12.6,14.4,15.5,16.2,13.8,9.7,8.5,6.9,2.9,2.6,5.8,7.6,11.2,13.4,14.2,14.6,12.7,11.9,6.9,6.8,2.6,1.2,3.2,9.3,9.7,13.8,17.7,18.1,14.2,9.2,7.0,5.4,3.6,0.2,6.2,6.9,11.7,13.1,15.8,13.5,14.3,9.4,6.0,5.7,5.5,5.3,9.2,8.9,10.3,15.2,16.3,15.4,12.5,10.8,6.4,4.5,3.4,4.7,3.7,7.4,11.1,14.1,15.9,15.8,15.1,10.8,6.4,4.7,1.6,4.4,7.3,9.4,12.8,15.2,17.3,17.2,14.9,12.6,7.1,6.0,3.8,4.1,6.4,8.9,12.8,16.1,15.1,15.0,13.1,10.3,7.3,3.9,3.9,6.9,8.2,10.0,11.0,14.4,15.2,15.4,15.2,10.9,6.0,2.2,4.3,4.4,2.8,7.7,10.3,13.7,15.1,14.5,12.6,10.4,5.5,1.8,2.1,0.7,6.0,8.7,10.6,14.9,15.2,14.3,12.9,11.1,8.2,2.6,3.4,4.5,4.3,8.7,13.3,13.8,16.1,15.5,14.1,8.9,7.4,3.6,3.3,3.1,5.2,8.0,11.7,14.7,14.0,14.9,12.3,11.0,4.5,4.7,2.9,5.7,6.5,7.2,11.0,15.4,15.0,14.7,13.8,10.1,5.6,5.5,4.5,5.4,7.0,7.7,10.4,14.0,16.7,15.7,13.5,10.8,5.4,4.2,4.4,1.9,6.3,8.1,9.8,14.8,15.0,15.4,13.9,12.5,6.5,3.0,5.5,1.0,3.3,7.4,11.2,13.9,16.8,16.4,13.9,13.0,5.4,3.3,3.7,2.9,3.7,6.7,13.0,16.4,15.2,16.0,14.4,10.7,7.8,4.3];

        // spot test against values taken from the reference implementation
        let test = asap_smooth(&data, 100);
        assert_eq!(test.len(), 93);
        assert!((test[10] - 9.021034).abs() < 0.000001);
        assert!((test[20] - 9.19).abs() < 0.000001);
        assert!((test[30] - 9.068966).abs() < 0.000001);
        assert!((test[40] - 9.237586).abs() < 0.000001);
        assert!((test[50] - 9.145172).abs() < 0.000001);
        assert!((test[60] - 9.014483).abs() < 0.000001);
        assert!((test[70] - 9.293448).abs() < 0.000001);
        assert!((test[80] - 9.417931).abs() < 0.000001);
        assert!((test[90] - 9.602069).abs() < 0.000001);
    }
}


================================================
FILE: crates/count-min-sketch/Cargo.toml
================================================
[package]
name = "countminsketch"
version = "0.1.0"
edition = "2021"

[dependencies]
rand = "0.8.4"
serde = { version = "1.0", features = ["derive"] }

================================================
FILE: crates/count-min-sketch/src/lib.rs
================================================
//! Count-Min Sketch implementation in Rust
//!
//! Based on the paper:
//! <http://dimacs.rutgers.edu/~graham/pubs/papers/cm-full.pdf>

use std::{
    fmt,
    hash::{Hash, Hasher},
};

#[allow(deprecated)]
use std::hash::SipHasher;

use serde::{Deserialize, Serialize};

/// The CountMinHashFn is a data structure used to hash items that are being
/// added to a Count-Min Sketch.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[repr(C)]
pub struct CountMinHashFn {
    key: u64,
}

const SEED: u64 = 0x517cc1b727220a95; // from FxHash

impl CountMinHashFn {
    /// Creates a new CountMinHashFn whose hash function key is equal to `key`.
    pub fn with_key(key: u64) -> Self {
        Self { key }
    }

    /// Computes the hash of `item` according to the hash function and returns
    /// the bucket index corresponding to the hashed value.
    ///
    /// The returned value will be between 0 and (`nbuckets` - 1).
    #[allow(deprecated)]
    pub fn hash_into_buckets<T: Hash>(&self, item: &T, nbuckets: usize) -> usize {
        let (key1, key2) = (self.key, SEED);
        let mut hasher = SipHasher::new_with_keys(key1, key2);
        item.hash(&mut hasher);
        let hash_val = hasher.finish();
        (hash_val % (nbuckets as u64)) as usize
    }

    /// Returns the key for the hash function.
    pub(crate) fn key(&self) -> u64 {
        self.key
    }
}

/// The Count-Min Sketch is a compact summary data structure capable of
/// representing a high-dimensional vector and answering queries on this vector,
/// in particular point queries and dot product queries, with strong accuracy
/// guarantees. Such queries are at the core of many computations, so the
/// structure can be used in order to answer a variety of other queries, such as
/// frequent items (heavy hitters), quantile finding, join size estimation, and
/// more. Since the data structure can easily process updates in the form of
/// additions or subtractions to dimensions of the vector (which may correspond
/// to insertions or deletions, or other transactions), it is capable of working
/// over streams of updates, at high rates.[1]
///
/// [1]: <http://dimacs.rutgers.edu/~graham/pubs/papers/cmencyc.pdf>
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct CountMinSketch {
    width: usize,
    depth: usize,
    // hashfuncs must be at least `depth` in length
    hashfuncs: Vec<CountMinHashFn>,
    // The outer and inner `Vec`s must be `depth` and `width` long, respectively
    counters: Vec<Vec<i64>>,
}

impl CountMinSketch {
    /// Constructs a new Count-Min Sketch with the specified dimensions, using
    /// `hashfuncs` to construct the underlying hash functions and `counters` to
    /// populate the sketch with any data.
    pub fn new(
        width: usize,
        depth: usize,
        hashfuncs: Vec<CountMinHashFn>,
        counters: Vec<Vec<i64>>,
    ) -> Self {
        assert_eq!(hashfuncs.len(), depth);
        assert_eq!(counters.len(), depth);
        assert_eq!(counters[0].len(), width);
        Self {
            width,
            depth,
            hashfuncs,
            counters,
        }
    }

    /// Constructs a new, empty Count-Min Sketch with the specified dimensions,
    /// using `keys` to seed the underlying hash functions.
    pub fn with_dims_and_hashfn_keys(width: usize, depth: usize, keys: Vec<u64>) -> Self {
        assert_eq!(keys.len(), depth);
        Self {
            width,
            depth,
            hashfuncs: keys
                .iter()
                .map(|key| CountMinHashFn::with_key(*key))
                .collect(),
            counters: vec![vec![0; width]; depth],
        }
    }

    /// Constructs a new, empty Count-Min Sketch with the specified dimensions.
    pub fn with_dim(width: usize, depth: usize) -> Self {
        let keys = (1..=depth).map(|k| k as u64).collect::<Vec<_>>();
        CountMinSketch::with_dims_and_hashfn_keys(width, depth, keys)
    }

    /// Constructs a new, empty Count-Min Sketch whose dimensions will be
    /// derived from the parameters.
    ///
    /// Then for any element *i*, an estimate of its count, âᵢ, will have the
    /// guarantee:
    ///         aᵢ ≤ âᵢ ≤ aᵢ + ϵN    with probability 1-δ
    /// where aᵢ is the true count of element *i*
    ///
    /// Thus `epsilon` controls the error of the estimated count, relative to
    /// the total number of items seen, and `delta` determines the probability
    /// that the estimate will exceed the true count beyond the epsilon error
    /// term.
    ///
    /// To accommodate this result, the sketch will have a width of ⌈e/ε⌉ and a
    /// depth of ⌈ln(1/δ)⌉.
    pub fn with_prob(epsilon: f64, delta: f64) -> Self {
        assert!(0.0 < epsilon && epsilon < 1.0);
        assert!(0.0 < delta && delta < 1.0);
        let width = (1f64.exp() / epsilon).ceil() as usize;
        let depth = (1f64 / delta).ln().ceil() as usize;
        CountMinSketch::with_dim(width, depth)
    }

    /// Returns the width of the sketch.
    pub fn width(&self) -> usize {
        self.width
    }

    /// Returns the depth of the sketch.
    pub fn depth(&self) -> usize {
        self.depth
    }

    /// Returns a vector containing the keys of the hash functions used with the
    /// sketch.
    pub fn hash_keys(&self) -> Vec<u64> {
        self.hashfuncs.iter().map(|f| f.key()).collect()
    }

    /// Returns a nested vector representing the sketch's counter table. Each
    /// element in the outer vector corresponds to a row of the counter table,
    /// and each element of the inner vector corresponds to the tally in that
    /// bucket for a given row.
    pub fn counters(&self) -> &Vec<Vec<i64>> {
        &self.counters
    }

    /// Returns an estimate of the number of times `item` has been seen by the
    /// sketch.
    pub fn estimate<T: Hash>(&self, item: T) -> i64 {
        let buckets = self
            .hashfuncs
            .iter()
            .map(|h| h.hash_into_buckets(&item, self.width));

        self.counters
            .iter()
            .zip(buckets)
            .map(|(counter, bucket)| counter[bucket])
            .min()
            .unwrap()
    }

    /// Returns a vector of the indices for the buckets into which `item` hashes.
    ///
    /// The vector will have `self.depth` elements, each in the range
    /// [0, self.width-1].
    pub fn get_bucket_indices<T: Hash>(&self, item: T) -> Vec<usize> {
        self.hashfuncs
            .iter()
            .map(|h| h.hash_into_buckets(&item, self.width))
            .collect()
    }

    /// Adds the given `item` to the sketch.
    pub fn add_value<T: Hash>(&mut self, item: T) {
        for i in 0..self.depth {
            let bucket = self.hashfuncs[i].hash_into_buckets(&item, self.width);
            self.counters[i][bucket] += 1;
        }
    }

    /// Subtract the given `item` from the sketch.
    pub fn subtract_value<T: Hash>(&mut self, item: T) {
        for i in 0..self.depth {
            let bucket = self.hashfuncs[i].hash_into_buckets(&item, self.width);
            self.counters[i][bucket] -= 1;
        }
    }

    /// Includes the counts from `other` into `self` via elementwise addition of
    /// the counter vectors.
    ///
    /// The underlying `CountMinHashFn`s in each sketch must have the same keys.
    pub fn combine(&mut self, other: CountMinSketch) {
        assert_eq!(self.width, other.width);
        assert_eq!(self.depth, other.depth);
        assert_eq!(self.hashfuncs, other.hashfuncs);
        for (counter1, counter2) in self.counters.iter_mut().zip(other.counters) {
            for (val1, val2) in counter1.iter_mut().zip(counter2) {
                *val1 += val2;
            }
        }
    }
}

impl fmt::Display for CountMinSketch {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        writeln!(f, "Count-Min Sketch:")?;
        write!(f, "+------++")?;
        for _ in 0..self.width {
            write!(f, "--------+")?;
        }
        writeln!(f)?;

        write!(f, "|      ||")?;
        for b in 0..self.width {
            write!(f, "    {b:>3} |")?;
        }
        writeln!(f)?;

        write!(f, "+======++")?;
        for _ in 0..self.width {
            write!(f, "========+")?;
        }
        writeln!(f)?;

        for n in 0..self.depth {
            write!(f, "|  {n:>3} ||")?;
            for x in &self.counters[n] {
                write!(f, " {x:>6} |")?;
            }
            writeln!(f)?;
        }

        write!(f, "+------++")?;
        for _ in 0..self.width {
            write!(f, "--------+")?;
        }
        writeln!(f)
    }
}


================================================
FILE: crates/count-min-sketch/tests/lib.rs
================================================
use countminsketch::CountMinSketch;

#[test]
fn empty_sketch() {
    let cms = CountMinSketch::with_dim(1, 1);
    assert_eq!(cms.estimate("foo"), 0);
}

#[test]
fn add_once() {
    let mut cms = CountMinSketch::with_dim(2, 2);
    cms.add_value("foo");
    assert_eq!(cms.estimate("foo"), 1);
}

#[test]
fn subtract_is_inverse_of_add() {
    let mut cms = CountMinSketch::with_dim(2, 2);
    cms.add_value("foo");
    cms.subtract_value("foo");
    assert_eq!(cms.estimate("foo"), 0);
}

#[test]
fn add_repeated() {
    let mut cms = CountMinSketch::with_dim(2, 2);
    for _ in 0..100_000 {
        cms.add_value("foo");
    }
    assert_eq!(cms.estimate("foo"), 100_000);
}

#[test]
fn add_repeated_with_collisions() {
    // if sketch has width = 2 and we add 3 items, then we
    // are guaranteed that we will have at least one hash
    // collision in every row
    let mut cms = CountMinSketch::with_dim(2, 5);

    for _ in 0..100_000 {
        cms.add_value("foo")
    }

    for _ in 0..1_000 {
        cms.add_value("bar")
    }

    for _ in 0..1_000_000 {
        cms.add_value("baz")
    }

    let foo_est = cms.estimate("foo");
    let bar_est = cms.estimate("bar");
    let baz_est = cms.estimate("baz");

    let err_margin = (0.01 * (100_000f64 + 1_000f64 + 1_000_000f64)) as i64;
    assert!(100_000 <= foo_est && foo_est < (100_000 + err_margin));
    assert!(1_000 <= bar_est && bar_est < (1_000 + err_margin));
    assert!(1_000_000 <= baz_est && baz_est < (1_000_000 + err_margin));
}


================================================
FILE: crates/counter-agg/Cargo.toml
================================================
[package]
name = "counter-agg"
version = "0.1.0"
edition = "2021"

# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

[dependencies]
flat_serialize = {path="../flat_serialize/flat_serialize"}
flat_serialize_macro = {path="../flat_serialize/flat_serialize_macro"}
serde = { version = "1.0", features = ["derive"] }
stats_agg = {path="../stats-agg"}
tspoint = {path="../tspoint"}

[dev-dependencies]
approx = "0.5.1"

================================================
FILE: crates/counter-agg/src/lib.rs
================================================
use serde::{Deserialize, Serialize};
use stats_agg::{stats2d::StatsSummary2D, XYPair};
use std::fmt;
use tspoint::TSPoint;

pub mod range;

#[cfg(test)]
mod tests;

#[derive(Debug, PartialEq, Eq)]
pub enum CounterError {
    OrderError,
    BoundsInvalid,
}

// TODO Intent is for this to be immutable with mutations going through (and
//  internal consistency protected by) the builders below.  But, we allow raw
//  access to the extension to allow it to (de)serialize, so the separation is
//  but a fiction for now.  If the only consequence of corruption is
//  nonsensical results rather than unsound behavior, garbage in garbage out.
//  But much better if we can validate at deserialization.  We can do that in
//  the builder if we want.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub struct MetricSummary {
    // TODO invariants?
    pub first: TSPoint,
    pub second: TSPoint,
    pub penultimate: TSPoint,
    pub last: TSPoint,
    // Invariants:
    // - num_changes > 0 if num_resets > 0
    // - num_resets > 0 if num_changes > 0
    // - reset_sum > 0 if num_resets > 0
    // - num_resets > 0 if reset_sum > 0
    pub reset_sum: f64,
    pub num_resets: u64,
    pub num_changes: u64,
    // TODO Protect from deserialization?  Is there any risk other than giving
    //  nonsensical results?  If so, maybe it's fine to just accept garbage
    //  out upon garbage in.
    pub stats: StatsSummary2D<f64>,
    // TODO See TODOs in I64Range about protecting from deserialization.
    pub bounds: Option<range::I64Range>,
}

// Note that this can lose fidelity with the timestamp, but it would only lose it in the microseconds,
// this is likely okay in most applications. However, if you need better regression analysis at the subsecond level,
// you can always subtract a common near value from all your times, then add it back in, the regression analysis will be unchanged.
// Note that convert the timestamp into seconds rather than microseconds here so that the slope and any other regression analysis, is done on a per-second basis.
// For instance, the slope will be the per-second slope, not the per-microsecond slope. The x intercept value will need to be converted back to microseconds so you get a timestamp out.
fn ts_to_xy(pt: TSPoint) -> XYPair<f64> {
    XYPair {
        x: to_seconds(pt.ts as f64),
        y: pt.val,
    }
}

fn to_seconds(t: f64) -> f64 {
    t / 1_000_000_f64 // by default postgres timestamps have microsecond precision
}

/// MetricSummary tracks monotonically increasing counters that may reset, ie every time the value decreases
/// it is treated as a reset of the counter and the previous value is added to the "true value" of the
/// counter at that timestamp.
impl MetricSummary {
    pub fn new(pt: &TSPoint, bounds: Option<range::I64Range>) -> MetricSummary {
        let mut n = MetricSummary {
            first: *pt,
            second: *pt,
            penultimate: *pt,
            last: *pt,
            reset_sum: 0.0,
            num_resets: 0,
            num_changes: 0,
            stats: StatsSummary2D::new(),
            bounds,
        };
        n.stats.accum(ts_to_xy(*pt)).unwrap();
        n
    }

    fn reset(&mut self, incoming: &TSPoint) {
        if incoming.val < self.last.val {
            self.reset_sum += self.last.val;
            self.num_resets += 1;
        }
    }

    // expects time-ordered input
    fn add_point(&mut self, incoming: &TSPoint) -> Result<(), CounterError> {
        if incoming.ts < self.last.ts {
            return Err(CounterError::OrderError);
        }
        //TODO: test this
        if incoming.ts == self.last.ts {
            // if two points are equal we only use the first we see
            // see discussion at https://github.com/timescale/timescaledb-toolkit/discussions/65
            return Ok(());
        }
        // right now we treat a counter reset that goes to exactly zero as a change (not sure that's correct, but it seems defensible)
        // These values are not rounded, so direct comparison is valid.
        if incoming.val != self.last.val {
            self.num_changes += 1;
        }
        if self.first == self.second {
            self.second = *incoming;
        }
        self.penultimate = self.last;
        self.last = *incoming;
        let mut incoming_xy = ts_to_xy(*incoming);
        incoming_xy.y += self.reset_sum;
        self.stats.accum(incoming_xy).unwrap();
        Ok(())
    }

    fn single_value(&self) -> bool {
        self.last == self.first
    }

    // combining can only happen for disjoint time ranges
    fn combine(&mut self, incoming: &MetricSummary) -> Result<(), CounterError> {
        // this requires that self comes before incoming in time order
        if self.last.ts >= incoming.first.ts {
            return Err(CounterError::OrderError);
        }

        // These values are not rounded, so direct comparison is valid.
        if self.last.val != incoming.first.val {
            self.num_changes += 1;
        }

        if incoming.single_value() {
            self.penultimate = self.last;
        } else {
            self.penultimate = incoming.penultimate;
        }
        if self.single_value() {
            self.second = incoming.first;
        }
        let mut stats = incoming.stats;
        // have to offset based on our reset_sum, including the amount we added based on any resets that happened at the boundary (but before we add in the incoming reset_sum)
        stats
            .offset(XYPair {
                x: 0.0,
                y: self.reset_sum,
            })
            .unwrap();
        self.last = incoming.last;
        self.reset_sum += incoming.reset_sum;
        self.num_resets += incoming.num_resets;
        self.num_changes += incoming.num_changes;

        self.stats = self.stats.combine(stats).unwrap();
        self.bounds_extend(incoming.bounds);
        Ok(())
    }

    pub fn time_delta(&self) -> f64 {
        to_seconds((self.last.ts - self.first.ts) as f64)
    }

    pub fn delta(&self) -> f64 {
        self.last.val + self.reset_sum - self.first.val
    }

    pub fn rate(&self) -> Option<f64> {
        if self.single_value() {
            return None;
        }
        Some(self.delta() / self.time_delta())
    }

    pub fn idelta_left(&self) -> f64 {
        //check for counter reset
        if self.second.val >= self.first.val {
            self.second.val - self.first.val
        } else {
            self.second.val // counter reset assumes it reset at the previous point, so we just return the second point
        }
    }

    pub fn idelta_right(&self) -> f64 {
        //check for counter reset
        if self.last.val >= self.penultimate.val {
            self.last.val - self.penultimate.val
        } else {
            self.last.val
        }
    }

    pub fn irate_left(&self) -> Option<f64> {
        if self.single_value() {
            None
        } else {
            Some(self.idelta_left() / to_seconds((self.second.ts - self.first.ts) as f64))
        }
    }

    pub fn irate_right(&self) -> Option<f64> {
        if self.single_value() {
            None
        } else {
            Some(self.idelta_right() / to_seconds((self.last.ts - self.penultimate.ts) as f64))
        }
    }

    pub fn bounds_valid(&self) -> bool {
        match self.bounds {
            None => true, // unbounded contains everything
            Some(b) => b.contains(self.last.ts) && b.contains(self.first.ts),
        }
    }

    fn bounds_extend(&mut self, in_bounds: Option<range::I64Range>) {
        match (self.bounds, in_bounds) {
            (None, _) => self.bounds = in_bounds,
            (_, None) => {}
            (Some(mut a), Some(b)) => {
                a.extend(&b);
                self.bounds = Some(a);
            }
        };
    }

    // based on:  https://github.com/timescale/promscale_extension/blob/d51a0958442f66cb78d38b584a10100f0d278298/src/lib.rs#L208,
    // which is based on:     // https://github.com/prometheus/prometheus/blob/e5ffa8c9a08a5ee4185271c8c26051ddc1388b7a/promql/functions.go#L59
    pub fn prometheus_delta(&self) -> Result<Option<f64>, CounterError> {
        if self.bounds.is_none() || !self.bounds_valid() || self.bounds.unwrap().has_infinite() {
            return Err(CounterError::BoundsInvalid);
        }
        //must have at least 2 values
        if self.single_value() || self.bounds.unwrap().is_singleton() {
            //technically, the is_singleton check is redundant, it's included for clarity (any singleton bound that is valid can only be one point)
            return Ok(None);
        }

        let mut result_val = self.delta();

        // all calculated durations in seconds in Prom implementation, so we'll do that here.
        // we can unwrap all of the bounds accesses as they are guaranteed to be there from the checks above
        let mut duration_to_start =
            to_seconds((self.first.ts - self.bounds.unwrap().left.unwrap()) as f64);

        /* bounds stores [L,H), but Prom takes the duration using the inclusive range [L, H-1ms]. Subtract an extra ms, ours is in microseconds. */
        let duration_to_end =
            to_seconds((self.bounds.unwrap().right.unwrap() - self.last.ts - 1_000) as f64);
        let sampled_interval = self.time_delta();
        let avg_duration_between_samples = sampled_interval / (self.stats.n - 1) as f64; // don't have to worry about divide by zero because we know we have at least 2 values from the above.

        // we don't want to extrapolate to negative counter values, so we calculate the duration to the zero point of the counter (based on what we know here) and set that as duration_to_start if it's smaller than duration_to_start
        if result_val > 0.0 && self.first.val >= 0.0 {
            let duration_to_zero = sampled_interval * (self.first.val / result_val);
            if duration_to_zero < duration_to_start {
                duration_to_start = duration_to_zero;
            }
        }

        // If the first/last samples are close to the boundaries of the range,
        // extrapolate the result. This is as we expect that another sample
        // will exist given the spacing between samples we've seen thus far,
        // with an allowance for noise.
        // Otherwise, we extrapolate to one half the avg distance between samples...
        // this was empirically shown to be good for certain things and was discussed at length in: https://github.com/prometheus/prometheus/pull/1161

        let extrapolation_threshold = avg_duration_between_samples * 1.1;
        let mut extrapolate_to_interval = sampled_interval;

        if duration_to_start < extrapolation_threshold {
            extrapolate_to_interval += duration_to_start
        } else {
            extrapolate_to_interval += avg_duration_between_samples / 2.0
        }

        if duration_to_end < extrapolation_threshold {
            extrapolate_to_interval += duration_to_end
        } else {
            extrapolate_to_interval += avg_duration_between_samples / 2.0
        }
        result_val *= extrapolate_to_interval / sampled_interval;
        Ok(Some(result_val))
    }

    pub fn prometheus_rate(&self) -> Result<Option<f64>, CounterError> {
        let delta = self.prometheus_delta()?;
        if delta.is_none() {
            return Ok(None);
        }
        let delta = delta.unwrap();
        let bounds = self.bounds.unwrap(); // if we got through delta without error then we have bounds
        let duration = bounds.duration().unwrap() - 1_000; // bounds stores [L,H), but Prom takes the duration using the inclusive range [L, H-1ms]. So subtract an extra ms from the duration
        if duration <= 0 {
            return Ok(None); // if we have a total duration under a ms, it's less than prom could deal with so we return none.
        }
        Ok(Some(delta / to_seconds(duration as f64))) // don't have to deal with 0 case because that is checked in delta as well (singleton)
    }
}

impl fmt::Display for CounterError {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
        match self {
            CounterError::OrderError => write!(
                f,
                "out of order points: points must be submitted in time-order"
            ),
            CounterError::BoundsInvalid => write!(f, "cannot calculate delta without valid bounds"),
        }
    }
}

#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct GaugeSummaryBuilder(MetricSummary);

impl GaugeSummaryBuilder {
    pub fn new(pt: &TSPoint, bounds: Option<range::I64Range>) -> Self {
        Self(MetricSummary::new(pt, bounds))
    }

    /// expects time-ordered input
    pub fn add_point(&mut self, incoming: &TSPoint) -> Result<(), CounterError> {
        self.0.add_point(incoming)
    }

    /// combining can only happen for disjoint time ranges
    pub fn combine(&mut self, incoming: &MetricSummary) -> Result<(), CounterError> {
        self.0.combine(incoming)
    }

    pub fn set_bounds(&mut self, bounds: Option<range::I64Range>) {
        self.0.bounds = bounds;
    }

    pub fn build(self) -> MetricSummary {
        self.0
    }

    pub fn first(&self) -> &TSPoint {
        &self.0.first
    }

    // TODO build method should check validity rather than caller
    pub fn bounds_valid(&self) -> bool {
        self.0.bounds_valid()
    }
}

impl From<MetricSummary> for GaugeSummaryBuilder {
    fn from(summary: MetricSummary) -> Self {
        Self(summary)
    }
}

#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct CounterSummaryBuilder(MetricSummary);

impl CounterSummaryBuilder {
    pub fn new(pt: &TSPoint, bounds: Option<range::I64Range>) -> Self {
        Self(MetricSummary::new(pt, bounds))
    }

    /// expects time-ordered input
    pub fn add_point(&mut self, incoming: &TSPoint) -> Result<(), CounterError> {
        self.0.reset(incoming);
        self.0.add_point(incoming)
    }

    /// combining can only happen for disjoint time ranges
    pub fn combine(&mut self, incoming: &MetricSummary) -> Result<(), CounterError> {
        self.0.reset(&incoming.first);
        self.0.combine(incoming)
    }

    pub fn set_bounds(&mut self, bounds: Option<range::I64Range>) {
        self.0.bounds = bounds;
    }

    pub fn build(self) -> MetricSummary {
        self.0
    }

    pub fn first(&self) -> &TSPoint {
        &self.0.first
    }

    // TODO build method should check validity rather than caller
    pub fn bounds_valid(&self) -> bool {
        self.0.bounds_valid()
    }
}

impl From<MetricSummary> for CounterSummaryBuilder {
    fn from(summary: MetricSummary) -> Self {
        Self(summary)
    }
}


================================================
FILE: crates/counter-agg/src/range.rs
================================================
use serde::{Deserialize, Serialize};
use std::cmp::{max, min};

// we always store ranges as half open, inclusive on left, exclusive on right,
// we are a discrete type so translating is simple [), this enforces equality
// between ranges like [0, 10) and [0, 9]
// None values denote infinite bounds on that side
#[derive(Debug, PartialEq, Eq, Copy, Clone, Serialize, Deserialize)]
#[repr(C)]
pub struct I64Range {
    pub left: Option<i64>,
    pub right: Option<i64>,
}

impl I64Range {
    pub fn has_infinite(&self) -> bool {
        self.left.is_none() || self.right.is_none()
    }

    // TODO See TODO below about range validity.  Right now we don't care
    //  much.  If we start to care, move the caring to `new` and `extend`
    //  methods.  That will allow this crate to protect the integrity of
    //  MetricSummary and I64Range in the face of the extension needing to be
    //  able to construct them from raw (and therefore potentially
    //  corrupt) inputs.
    fn is_valid(&self) -> bool {
        match (self.left, self.right) {
            (Some(a), Some(b)) => a <= b,
            _ => true,
        }
    }

    pub fn is_singleton(&self) -> bool {
        match (self.left, self.right) {
            (Some(a), Some(b)) => a == b,
            _ => false,
        }
    }

    pub fn extend(&mut self, other: &Self) {
        // TODO: What should extend do with invalid ranges on either side? right now it treats them as if they are real...
        self.left = match (self.left, other.left) {
            (None, _) => None,
            (_, None) => None,
            (Some(a), Some(b)) => Some(min(a, b)),
        };
        self.right = match (self.right, other.right) {
            (None, _) => None,
            (_, None) => None,
            (Some(a), Some(b)) => Some(max(a, b)),
        };
    }

    pub fn contains(&self, pt: i64) -> bool {
        match (self.left, self.right) {
            (Some(l), Some(r)) => pt >= l && pt < r,
            (Some(l), None) => pt >= l,
            (None, Some(r)) => pt < r,
            (None, None) => true,
        }
    }

    // pub fn contains(&self, other: I64Range) -> bool{
    //     unimplemented!()
    // }
    pub fn duration(&self) -> Option<i64> {
        if self.has_infinite() || !self.is_valid() {
            return None;
        }
        Some(self.right.unwrap() - self.left.unwrap())
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    #[test]
    fn test_extend() {
        let mut a = I64Range {
            left: Some(4),
            right: Some(5),
        };
        let b = I64Range {
            left: Some(3),
            right: Some(6),
        };
        a.extend(&b);
        // b completely covers a
        assert_eq!(a, b);
        // extend to left
        let c = I64Range {
            left: Some(2),
            right: Some(5),
        };
        a.extend(&c);
        assert_eq!(
            a,
            I64Range {
                left: Some(2),
                right: Some(6)
            }
        );
        // extend to right
        let d = I64Range {
            left: Some(6),
            right: Some(9),
        };
        a.extend(&d);
        assert_eq!(
            a,
            I64Range {
                left: Some(2),
                right: Some(9)
            }
        );
        // infinites
        let e = I64Range {
            left: Some(10),
            right: None,
        };
        a.extend(&e);
        assert_eq!(
            a,
            I64Range {
                left: Some(2),
                right: None
            }
        );
        let f = I64Range {
            left: None,
            right: Some(5),
        };
        a.extend(&f);
        assert_eq!(
            a,
            I64Range {
                left: None,
                right: None
            }
        );
        // if a range contains another, it's unaffected
        a.extend(&c);
        assert_eq!(
            a,
            I64Range {
                left: None,
                right: None
            }
        );
        // whether infinite or not
        let mut a = I64Range {
            left: Some(2),
            right: Some(9),
        };
        a.extend(&b);
        assert_eq!(
            a,
            I64Range {
                left: Some(2),
                right: Some(9)
            }
        );

        // right now invalid ranges are are extended as normal though they can only ever extend in a single direction
        let weird = I64Range {
            left: Some(-2),
            right: Some(-9),
        };
        a.extend(&weird);
        assert_eq!(
            a,
            I64Range {
                left: Some(-2),
                right: Some(9)
            }
        );
        let weird = I64Range {
            left: Some(20),
            right: Some(10),
        };
        a.extend(&weird);
        assert_eq!(
            a,
            I64Range {
                left: Some(-2),
                right: Some(10)
            }
        );

        //same if we extend a weird one, we can make a valid, or invalid one...
        let mut weird = I64Range {
            left: Some(-2),
            right: Some(-9),
        };

        let weird2 = I64Range {
            left: Some(-6),
            right: Some(-10),
        };
        weird.extend(&weird2);
        assert_eq!(
            weird,
            I64Range {
                left: Some(-6),
                right: Some(-9)
            }
        );
        // it is also possible to get a valid range from two weirds
        let weird3 = I64Range {
            left: Some(6),
            right: Some(3),
        };
        weird.extend(&weird3);
        assert_eq!(
            weird,
            I64Range {
                left: Some(-6),
                right: Some(3)
            }
        );
        assert!(weird.is_valid());

        // extending with a valid should always produce a valid and will work as usual
        let mut weird = I64Range {
            left: Some(-6),
            right: Some(-9),
        };
        let normal = I64Range {
            left: Some(2),
            right: Some(9),
        };
        weird.extend(&normal);
        assert_eq!(
            weird,
            I64Range {
                left: Some(-6),
                right: Some(9)
            }
        );
    }

    #[test]
    fn test_contains() {
        let a = I64Range {
            left: Some(2),
            right: Some(5),
        };
        assert!(a.contains(2));
        assert!(a.contains(4));
        assert!(!a.contains(5));
        assert!(!a.contains(6));

        let a = I64Range {
            left: None,
            right: Some(-5),
        };
        assert!(a.contains(-100));
        assert!(!a.contains(0));
        assert!(!a.contains(6));

        let a = I64Range {
            left: Some(-10),
            right: None,
        };
        assert!(a.contains(-10));
        assert!(a.contains(0));
        assert!(a.contains(1000));
        assert!(!a.contains(-20));

        //invalid ranges contain no points
        let a = I64Range {
            left: Some(0),
            right: Some(-5),
        };
        assert!(!a.contains(-4));
        assert!(!a.contains(1));
        assert!(!a.contains(-6));
    }

    #[test]
    fn test_duration() {
        let a = I64Range {
            left: Some(3),
            right: Some(7),
        };
        assert_eq!(a.duration().unwrap(), 4);
        let a = I64Range {
            left: Some(-3),
            right: Some(7),
        };
        assert_eq!(a.duration().unwrap(), 10);
        let a = I64Range {
            left: None,
            right: Some(7),
        };
        assert_eq!(a.duration(), None);
        let a = I64Range {
            left: Some(3),
            right: None,
        };
        assert_eq!(a.duration(), None);
        //invalid ranges return None durations as well
        let a = I64Range {
            left: Some(3),
            right: Some(0),
        };
        assert_eq!(a.duration(), None);
    }

    #[test]
    fn test_checks() {
        let a = I64Range {
            left: Some(2),
            right: Some(5),
        };
        assert!(a.is_valid());
        assert!(!a.is_singleton());
        let a = I64Range {
            left: None,
            right: Some(-5),
        };
        assert!(a.is_valid());
        assert!(!a.is_singleton());
        let a = I64Range {
            left: Some(-10),
            right: None,
        };
        assert!(a.is_valid());
        assert!(!a.is_singleton());
        let a = I64Range {
            left: Some(2),
            right: Some(2),
        };
        assert!(a.is_valid());
        assert!(a.is_singleton());
        assert_eq!(a.duration().unwrap(), 0);
        let a = I64Range {
            left: Some(0),
            right: Some(-10),
        };
        assert!(!a.is_valid());
        assert!(!a.is_singleton());
    }
}


================================================
FILE: crates/counter-agg/src/tests.rs
================================================
// TODO Move to ../tests/lib.rs

use crate::range::I64Range;
use crate::*;
use approx::assert_relative_eq;
fn to_micro(t: f64) -> f64 {
    t * 1_000_000.0
}
//do proper numerical comparisons on the values where that matters, use exact where it should be exact.
#[track_caller]
pub fn assert_close_enough(p1: &MetricSummary, p2: &MetricSummary) {
    assert_eq!(p1.first, p2.first, "first");
    assert_eq!(p1.second, p2.second, "second");
    assert_eq!(p1.penultimate, p2.penultimate, "penultimate");
    assert_eq!(p1.last, p2.last, "last");
    assert_eq!(p1.num_changes, p2.num_changes, "num_changes");
    assert_eq!(p1.num_resets, p2.num_resets, "num_resets");
    assert_eq!(p1.stats.n, p2.stats.n, "n");
    assert_relative_eq!(p1.stats.sx, p2.stats.sx);
    assert_relative_eq!(p1.stats.sx2, p2.stats.sx2);
    assert_relative_eq!(p1.stats.sy, p2.stats.sy);
    assert_relative_eq!(p1.stats.sy2, p2.stats.sy2);
    assert_relative_eq!(p1.stats.sxy, p2.stats.sxy);
}

#[test]
fn create() {
    let testpt = TSPoint { ts: 0, val: 0.0 };
    let test = CounterSummaryBuilder::new(&testpt, None).build();
    assert_eq!(test.first, testpt);
    assert_eq!(test.second, testpt);
    assert_eq!(test.penultimate, testpt);
    assert_eq!(test.last, testpt);
    assert_eq!(test.reset_sum, 0.0);
}
#[test]
fn adding_point() {
    let mut test = CounterSummaryBuilder::new(&TSPoint { ts: 0, val: 0.0 }, None);
    let testpt = TSPoint { ts: 5, val: 10.0 };

    test.add_point(&testpt).unwrap();

    let test = test.build();
    assert_eq!(test.first, TSPoint { ts: 0, val: 0.0 });
    assert_eq!(test.second, testpt);
    assert_eq!(test.penultimate, TSPoint { ts: 0, val: 0.0 });
    assert_eq!(test.last, testpt);
    assert_eq!(test.reset_sum, 0.0);
    assert_eq!(test.num_resets, 0);
    assert_eq!(test.num_changes, 1);
}

#[test]
fn adding_points_to_counter() {
    let startpt = TSPoint { ts: 0, val: 0.0 };
    let mut summary = CounterSummaryBuilder::new(&startpt, None);

    summary.add_point(&TSPoint { ts: 5, val: 10.0 }).unwrap();
    summary.add_point(&TSPoint { ts: 10, val: 20.0 }).unwrap();
    summary.add_point(&TSPoint { ts: 15, val: 20.0 }).unwrap();
    summary.add_point(&TSPoint { ts: 20, val: 50.0 }).unwrap();
    summary.add_point(&TSPoint { ts: 25, val: 10.0 }).unwrap();

    let summary = summary.build();
    assert_eq!(summary.first, startpt);
    assert_eq!(summary.second, TSPoint { ts: 5, val: 10.0 });
    assert_eq!(summary.penultimate, TSPoint { ts: 20, val: 50.0 });
    assert_eq!(summary.last, TSPoint { ts: 25, val: 10.0 });
    assert_relative_eq!(summary.reset_sum, 50.0);
    assert_eq!(summary.num_resets, 1);
    assert_eq!(summary.num_changes, 4);
    assert_eq!(summary.stats.count(), 6);
    assert_relative_eq!(summary.stats.sum().unwrap().x, 0.000075);
    // non obvious one here, sumy should be the sum of all values including the resets at the time.
    assert_relative_eq!(
        summary.stats.sum().unwrap().y,
        0.0 + 10.0 + 20.0 + 20.0 + 50.0 + 60.0
    );
}

#[test]
fn adding_out_of_order_counter() {
    let startpt = TSPoint { ts: 0, val: 0.0 };
    let mut summary = CounterSummaryBuilder::new(&startpt, None);

    summary.add_point(&TSPoint { ts: 5, val: 10.0 }).unwrap();
    assert_eq!(
        CounterError::OrderError,
        summary.add_point(&TSPoint { ts: 2, val: 9.0 }).unwrap_err()
    );
}

#[test]
fn test_counter_delta() {
    let startpt = &TSPoint { ts: 0, val: 10.0 };
    let mut summary = CounterSummaryBuilder::new(startpt, None);

    // with one point
    assert_relative_eq!(summary.clone().build().delta(), 0.0);

    // simple case
    summary.add_point(&TSPoint { ts: 10, val: 20.0 }).unwrap();
    assert_relative_eq!(summary.clone().build().delta(), 10.0);

    //now with a reset
    summary.add_point(&TSPoint { ts: 20, val: 10.0 }).unwrap();
    assert_relative_eq!(summary.clone().build().delta(), 20.0);
}

#[test]
fn test_combine() {
    let mut summary = CounterSummaryBuilder::new(&TSPoint { ts: 0, val: 0.0 }, None);
    summary.add_point(&TSPoint { ts: 5, val: 10.0 }).unwrap();
    summary.add_point(&TSPoint { ts: 10, val: 20.0 }).unwrap();
    summary.add_point(&TSPoint { ts: 15, val: 30.0 }).unwrap();
    summary.add_point(&TSPoint { ts: 20, val: 50.0 }).unwrap();
    summary.add_point(&TSPoint { ts: 25, val: 10.0 }).unwrap();
    summary.add_point(&TSPoint { ts: 30, val: 40.0 }).unwrap();

    let mut part1 = CounterSummaryBuilder::new(&TSPoint { ts: 0, val: 0.0 }, None);
    part1.add_point(&TSPoint { ts: 5, val: 10.0 }).unwrap();
    part1.add_point(&TSPoint { ts: 10, val: 20.0 }).unwrap();

    let mut part2 = CounterSummaryBuilder::new(&TSPoint { ts: 15, val: 30.0 }, None);
    part2.add_point(&TSPoint { ts: 20, val: 50.0 }).unwrap();
    part2.add_point(&TSPoint { ts: 25, val: 10.0 }).unwrap();
    part2.add_point(&TSPoint { ts: 30, val: 40.0 }).unwrap();

    let mut combined = part1.clone();
    combined.combine(&part2.clone().build()).unwrap();
    assert_close_enough(&summary.build(), &combined.build());

    // test error in wrong direction
    assert_eq!(
        part2.combine(&part1.build()).unwrap_err(),
        CounterError::OrderError
    );
}

#[test]
fn test_combine_with_small_summary() {
    let mut summary = CounterSummaryBuilder::new(&TSPoint { ts: 0, val: 50.0 }, None);
    summary.add_point(&TSPoint { ts: 25, val: 10.0 }).unwrap();

    // also tests that a reset at the boundary works correctly
    let part1 = CounterSummaryBuilder::new(&TSPoint { ts: 0, val: 50.0 }, None);
    let part2 = CounterSummaryBuilder::new(&TSPoint { ts: 25, val: 10.0 }, None);

    let mut combined = part1.clone();
    combined.combine(&part2.clone().build()).unwrap();
    assert_close_enough(&summary.build(), &combined.build());

    // test error in wrong direction
    combined = part2;
    assert_eq!(
        combined.combine(&part1.build()).unwrap_err(),
        CounterError::OrderError
    );
}
#[test]
fn test_multiple_resets() {
    let startpt = TSPoint { ts: 0, val: 0.0 };
    let mut summary = CounterSummaryBuilder::new(&startpt, None);

    summary.add_point(&TSPoint { ts: 5, val: 10.0 }).unwrap();
    summary.add_point(&TSPoint { ts: 10, val: 20.0 }).unwrap();
    summary.add_point(&TSPoint { ts: 15, val: 10.0 }).unwrap();
    summary.add_point(&TSPoint { ts: 20, val: 40.0 }).unwrap();
    summary.add_point(&TSPoint { ts: 25, val: 20.0 }).unwrap();
    summary.add_point(&TSPoint { ts: 30, val: 40.0 }).unwrap();

    let summary = summary.build();
    assert_eq!(summary.first, startpt);
    assert_eq!(summary.second, TSPoint { ts: 5, val: 10.0 });
    assert_eq!(summary.penultimate, TSPoint { ts: 25, val: 20.0 });
    assert_eq!(summary.last, TSPoint { ts: 30, val: 40.0 });
    assert_relative_eq!(summary.reset_sum, 60.0);
    assert_eq!(summary.num_resets, 2);
    assert_eq!(summary.num_changes, 6);
    assert_eq!(summary.stats.count(), 7);
    assert_relative_eq!(summary.stats.sum().unwrap().x, 0.000105);
    // non obvious one here, sy should be the sum of all values including the resets at the time they were added.
    assert_relative_eq!(
        summary.stats.sum().unwrap().y,
        0.0 + 10.0 + 20.0 + 30.0 + 60.0 + 80.0 + 100.0
    );

    let mut part1 = CounterSummaryBuilder::new(&TSPoint { ts: 0, val: 0.0 }, None);
    part1.add_point(&TSPoint { ts: 5, val: 10.0 }).unwrap();
    part1.add_point(&TSPoint { ts: 10, val: 20.0 }).unwrap();

    let mut part2 = CounterSummaryBuilder::new(&TSPoint { ts: 15, val: 10.0 }, None);
    part2.add_point(&TSPoint { ts: 20, val: 40.0 }).unwrap();
    part2.add_point(&TSPoint { ts: 25, val: 20.0 }).unwrap();
    part2.add_point(&TSPoint { ts: 30, val: 40.0 }).unwrap();

    let mut combined = part1.clone();
    combined.combine(&part2.clone().build()).unwrap();
    assert_close_enough(&summary, &combined.build());

    // test error in wrong direction
    assert_eq!(
        part2.combine(&part1.build()).unwrap_err(),
        CounterError::OrderError
    );
}

#[test]
fn test_extraction_single_point() {
    let startpt = TSPoint { ts: 20, val: 10.0 };
    let summary = CounterSummaryBuilder::new(&startpt, None).build();
    assert_relative_eq!(summary.delta(), 0.0);
    assert_eq!(summary.rate(), None);
    assert_relative_eq!(summary.idelta_left(), 0.0);
    assert_relative_eq!(summary.idelta_right(), 0.0);
    assert_eq!(summary.irate_left(), None);
    assert_eq!(summary.irate_right(), None);
    assert_eq!(summary.num_changes, 0);
    assert_eq!(summary.num_resets, 0);
}

#[test]
fn test_extraction_simple() {
    let mut summary = CounterSummaryBuilder::new(&TSPoint { ts: 0, val: 0.0 }, None);
    summary.add_point(&TSPoint { ts: 5, val: 5.0 }).unwrap();
    summary.add_point(&TSPoint { ts: 10, val: 20.0 }).unwrap();
    summary.add_point(&TSPoint { ts: 15, val: 30.0 }).unwrap();

    let summary = summary.build();
    assert_relative_eq!(summary.delta(), 30.0);
    assert_relative_eq!(summary.rate().unwrap(), to_micro(2.0));
    assert_relative_eq!(summary.idelta_left(), 5.0);
    assert_relative_eq!(summary.idelta_right(), 10.0);
    assert_relative_eq!(summary.irate_left().unwrap(), to_micro(1.0));
    assert_relative_eq!(summary.irate_right().unwrap(), to_micro(2.0));
    assert_eq!(summary.num_changes, 3);
    assert_eq!(summary.num_resets, 0);
}

#[test]
fn test_extraction_with_resets() {
    let mut summary = CounterSummaryBuilder::new(&TSPoint { ts: 0, val: 10.0 }, None);
    summary.add_point(&TSPoint { ts: 5, val: 5.0 }).unwrap();
    summary.add_point(&TSPoint { ts: 10, val: 30.0 }).unwrap();
    summary.add_point(&TSPoint { ts: 15, val: 15.0 }).unwrap();

    let summary = summary.build();
    assert_relative_eq!(summary.delta(), 45.0);
    assert_relative_eq!(summary.rate().unwrap(), to_micro(3.0));
    assert_relative_eq!(summary.idelta_left(), 5.0);
    assert_relative_eq!(summary.idelta_right(), 15.0);
    assert_relative_eq!(summary.irate_left().unwrap(), to_micro(1.0));
    assert_relative_eq!(summary.irate_right().unwrap(), to_micro(3.0));
    assert_eq!(summary.num_changes, 3);
    assert_eq!(summary.num_resets, 2);
}

#[test]
fn test_bounds() {
    let summary = CounterSummaryBuilder::new(&TSPoint { ts: 0, val: 10.0 }, None);
    assert!(summary.bounds_valid()); // no bound is fine.

    let summary = CounterSummaryBuilder::new(
        &TSPoint { ts: 0, val: 10.0 },
        Some(I64Range {
            left: Some(5),
            right: Some(10),
        }),
    );
    assert!(!summary.bounds_valid()); // wrong bound not

    // left bound inclusive
    let mut summary = CounterSummaryBuilder::new(
        &TSPoint { ts: 0, val: 10.0 },
        Some(I64Range {
            left: Some(0),
            right: Some(10),
        }),
    );
    assert!(summary.bounds_valid());
    summary.add_point(&TSPoint { ts: 5, val: 5.0 }).unwrap();
    assert!(summary.bounds_valid());

    // adding points past our bounds is okay, but the bounds will be invalid when we check, this will happen in the final function not on every point addition for efficiency
    // note the right bound is exclusive
    summary.add_point(&TSPoint { ts: 10, val: 10.0 }).unwrap();
    assert!(!summary.bounds_valid());

    // slightly weird case here... two invalid bounds can produce a validly bounded object once the bounds are combined, this is a bit weird, but seems like it's the correct behavior
    let summary2 = CounterSummaryBuilder::new(
        &TSPoint { ts: 15, val: 10.0 },
        Some(I64Range {
            left: Some(20),
            right: Some(30),
        }),
    );
    summary.combine(&summary2.build()).unwrap();
    assert!(summary.bounds_valid());
    assert_eq!(
        summary.clone().build().bounds.unwrap(),
        I64Range {
            left: Some(0),
            right: Some(30)
        }
    );

    // two of the same valid bounds remain the same and valid
    let summary2 = CounterSummaryBuilder::new(
        &TSPoint { ts: 20, val: 10.0 },
        Some(I64Range {
            left: Some(0),
            right: Some(30),
        }),
    );
    summary.combine(&summary2.build()).unwrap();
    assert!(summary.bounds_valid());
    assert_eq!(
        summary.clone().build().bounds.unwrap(),
        I64Range {
            left: Some(0),
            right: Some(30)
        }
    );

    // combining with unbounded ones is fine, but the bounds survive
    let summary2 = CounterSummaryBuilder::new(&TSPoint { ts: 25, val: 10.0 }, None);
    summary.combine(&summary2.build()).unwrap();
    assert!(summary.bounds_valid());
    assert_eq!(
        summary.clone().build().bounds.unwrap(),
        I64Range {
            left: Some(0),
            right: Some(30)
        }
    );

    // and combining bounds that do not span are still invalid
    let summary2 = CounterSummaryBuilder::new(
        &TSPoint { ts: 35, val: 10.0 },
        Some(I64Range {
            left: Some(0),
            right: Some(32),
        }),
    );
    summary.combine(&summary2.build()).unwrap();
    assert!(!summary.bounds_valid());
    assert_eq!(
        summary.build().bounds.unwrap(),
        I64Range {
            left: Some(0),
            right: Some(32)
        }
    );

    // combining unbounded with bounded ones is fine, but the bounds survive
    let mut summary = CounterSummaryBuilder::new(&TSPoint { ts: 0, val: 10.0 }, None);
    let summary2 = CounterSummaryBuilder::new(
        &TSPoint { ts: 25, val: 10.0 },
        Some(I64Range {
            left: Some(0),
            right: Some(30),
        }),
    );
    summary.combine(&summary2.build()).unwrap();
    assert!(summary.bounds_valid());
    assert_eq!(
        summary.build().bounds.unwrap(),
        I64Range {
            left: Some(0),
            right: Some(30)
        }
    );
}

#[test]
fn test_prometheus_extrapolation_simple() {
    //error on lack of bounds provided
    let summary = CounterSummaryBuilder::new(
        &TSPoint {
            ts: 5000,
            val: 15.0,
        },
        None,
    );
    let summary = summary.build();
    assert_eq!(
        summary.prometheus_delta().unwrap_err(),
        CounterError::BoundsInvalid
    );
    assert_eq!(
        summary.prometheus_rate().unwrap_err(),
        CounterError::BoundsInvalid
    );

    //error on infinite bounds
    let summary = CounterSummaryBuilder::new(
        &TSPoint {
            ts: 5000,
            val: 15.0,
        },
        Some(I64Range {
            left: None,
            right: Some(21000),
        }),
    )
    .build();
    assert_eq!(
        summary.prometheus_delta().unwrap_err(),
        CounterError::BoundsInvalid
    );
    assert_eq!(
        summary.prometheus_rate().unwrap_err(),
        CounterError::BoundsInvalid
    );

    //ranges less than 1ms are treated as zero by Prom
    let mut summary = CounterSummaryBuilder::new(
        &TSPoint { ts: 300, val: 15.0 },
        Some(I64Range {
            left: Some(0),
            right: Some(900),
        }),
    );
    summary.add_point(&TSPoint { ts: 600, val: 20.0 }).unwrap();
    assert_eq!(summary.build().prometheus_rate().unwrap(), None);

    //ranges should go out an extra 1000 so that we account for the extra duration that prom subtracts (1 ms)
    let mut summary = CounterSummaryBuilder::new(
        &TSPoint {
            ts: 5000,
            val: 15.0,
        },
        Some(I64Range {
            left: Some(0),
            right: Some(21000),
        }),
    );
    // singletons should return none
    assert_eq!(summary.clone().build().prometheus_delta().unwrap(), None);
    assert_eq!(summary.clone().build().prometheus_rate().unwrap(), None);

    // TODO Was this intentional?  add_point and then we immediately discard!
    summary
        .add_point(&TSPoint {
            ts: 10000,
            val: 20.0,
        })
        .unwrap();

    //ranges should go out an extra 1000 so that we account for the extra duration that prom subtracts (1 ms)
    let mut summary = CounterSummaryBuilder::new(
        &TSPoint {
            ts: 5000,
            val: 15.0,
        },
        Some(I64Range {
            left: Some(0),
            right: Some(21000),
        }),
    );
    // singletons should return none
    assert_eq!(summary.clone().build().prometheus_delta().unwrap(), None);
    assert_eq!(summary.clone().build().prometheus_rate().unwrap(), None);

    summary
        .add_point(&TSPoint {
            ts: 10000,
            val: 20.0,
        })
        .unwrap();
    summary
        .add_point(&TSPoint {
            ts: 15000,
            val: 25.0,
        })
        .unwrap();

    let summary = summary.build();
    assert_relative_eq!(summary.delta(), 10.0);
    assert_relative_eq!(summary.rate().unwrap(), to_micro(0.001));
    assert_relative_eq!(summary.prometheus_delta().unwrap().unwrap(), 20.0);
    // linear cases like this should be equal
    assert_relative_eq!(
        summary.prometheus_rate().unwrap().unwrap(),
        summary.rate().unwrap()
    );

    // add a point outside our bounds and make sure we error correctly
    let mut summary = CounterSummaryBuilder::from(summary);
    summary
        .add_point(&TSPoint {
            ts: 25000,
            val: 35.0,
        })
        .unwrap();
    let summary = summary.build();
    assert_eq!(
        summary.prometheus_delta().unwrap_err(),
        CounterError::BoundsInvalid
    );
    assert_eq!(
        summary.prometheus_rate().unwrap_err(),
        CounterError::BoundsInvalid
    );
}

#[test]
fn test_prometheus_extrapolation_bound_size() {
    let mut summary = CounterSummaryBuilder::new(
        &TSPoint {
            ts: 20000,
            val: 40.0,
        },
        Some(I64Range {
            left: Some(10000),
            right: Some(51000),
        }),
    );
    summary
        .add_point(&TSPoint {
            ts: 30000,
            val: 20.0,
        })
        .unwrap();
    summary
        .add_point(&
Download .txt
gitextract_o1olp053/

├── .cargo/
│   └── config
├── .dockerignore
├── .git-blame-ignore-revs
├── .github/
│   ├── ISSUE_TEMPLATE/
│   │   ├── bug-report.md
│   │   ├── feature-request.md
│   │   ├── feature-stabilization.md
│   │   └── proposed-feature.md
│   └── workflows/
│       ├── add-to-bugs-board.yml
│       ├── ci.yml
│       ├── ci_image_build.yml
│       ├── clippy_rustfmt.yml
│       ├── dependency-updates.yml
│       ├── packaging.yml
│       ├── release.yml
│       └── report_packaging_failures.yml
├── .gitignore
├── Cargo.toml
├── Changelog.md
├── LICENSE
├── NOTICE
├── Readme.md
├── crates/
│   ├── aggregate_builder/
│   │   ├── Cargo.toml
│   │   ├── Readme.md
│   │   └── src/
│   │       └── lib.rs
│   ├── asap/
│   │   ├── Cargo.toml
│   │   └── src/
│   │       ├── fft.rs
│   │       └── lib.rs
│   ├── count-min-sketch/
│   │   ├── Cargo.toml
│   │   ├── src/
│   │   │   └── lib.rs
│   │   └── tests/
│   │       └── lib.rs
│   ├── counter-agg/
│   │   ├── Cargo.toml
│   │   └── src/
│   │       ├── lib.rs
│   │       ├── range.rs
│   │       └── tests.rs
│   ├── encodings/
│   │   ├── Cargo.toml
│   │   └── src/
│   │       └── lib.rs
│   ├── flat_serialize/
│   │   ├── Readme.md
│   │   ├── example_generated.rs
│   │   ├── flat_serialize/
│   │   │   ├── Cargo.toml
│   │   │   └── src/
│   │   │       └── lib.rs
│   │   └── flat_serialize_macro/
│   │       ├── Cargo.toml
│   │       └── src/
│   │           ├── lib.rs
│   │           └── parser.rs
│   ├── hyperloglogplusplus/
│   │   ├── Cargo.toml
│   │   └── src/
│   │       ├── dense.rs
│   │       ├── hyperloglog_data.rs
│   │       ├── lib.rs
│   │       ├── registers.rs
│   │       ├── sparse/
│   │       │   └── varint.rs
│   │       └── sparse.rs
│   ├── scripting-utilities/
│   │   ├── Readme.md
│   │   ├── control_file_reader/
│   │   │   ├── Cargo.toml
│   │   │   └── src/
│   │   │       └── lib.rs
│   │   └── postgres_connection_configuration/
│   │       ├── Cargo.toml
│   │       └── src/
│   │           └── lib.rs
│   ├── stats-agg/
│   │   ├── Cargo.toml
│   │   └── src/
│   │       ├── lib.rs
│   │       ├── stats1d.rs
│   │       ├── stats2d/
│   │       │   └── stats2d_flat_serialize.rs
│   │       └── stats2d.rs
│   ├── t-digest/
│   │   ├── Cargo.toml
│   │   └── src/
│   │       └── lib.rs
│   ├── t-digest-lib/
│   │   ├── Cargo.toml
│   │   └── src/
│   │       └── lib.rs
│   ├── time-weighted-average/
│   │   ├── Cargo.toml
│   │   └── src/
│   │       └── lib.rs
│   ├── tspoint/
│   │   ├── Cargo.toml
│   │   └── src/
│   │       └── lib.rs
│   └── udd-sketch/
│       ├── Cargo.toml
│       └── src/
│           └── lib.rs
├── docker/
│   ├── README.md
│   └── ci/
│       ├── Dockerfile
│       └── setup.sh
├── docs/
│   ├── README.md
│   ├── asap.md
│   ├── client.md
│   ├── counter_agg.md
│   ├── examples/
│   │   ├── tdigest.c
│   │   └── tdigest.py
│   ├── gauge_agg.md
│   ├── hyperloglog.md
│   ├── lttb.md
│   ├── ordered-aggregates.md
│   ├── percentile_approximation.md
│   ├── release.md
│   ├── rolling_average_api_working.md
│   ├── state_agg.md
│   ├── stats_agg.md
│   ├── tdigest.md
│   ├── template.md
│   ├── test_caggs.md
│   ├── test_candlestick_agg.md
│   ├── time_weighted_average.md
│   ├── timeseries.md
│   ├── timeseries_pipeline_elements.md
│   ├── two-step_aggregation.md
│   └── uddsketch.md
├── extension/
│   ├── .gitignore
│   ├── Cargo.toml
│   ├── src/
│   │   ├── accessors/
│   │   │   └── tests.rs
│   │   ├── accessors.rs
│   │   ├── aggregate_builder_tests.rs
│   │   ├── aggregate_utils.rs
│   │   ├── asap.rs
│   │   ├── bin/
│   │   │   └── pgrx_embed.rs
│   │   ├── candlestick.rs
│   │   ├── counter_agg/
│   │   │   └── accessors.rs
│   │   ├── counter_agg.rs
│   │   ├── countminsketch.rs
│   │   ├── datum_utils.rs
│   │   ├── duration.rs
│   │   ├── frequency.rs
│   │   ├── gauge_agg.rs
│   │   ├── heartbeat_agg/
│   │   │   └── accessors.rs
│   │   ├── heartbeat_agg.rs
│   │   ├── hyperloglog.rs
│   │   ├── lib.rs
│   │   ├── lttb.rs
│   │   ├── nmost/
│   │   │   ├── max_by_float.rs
│   │   │   ├── max_by_int.rs
│   │   │   ├── max_by_time.rs
│   │   │   ├── max_float.rs
│   │   │   ├── max_int.rs
│   │   │   ├── max_time.rs
│   │   │   ├── min_by_float.rs
│   │   │   ├── min_by_int.rs
│   │   │   ├── min_by_time.rs
│   │   │   ├── min_float.rs
│   │   │   ├── min_int.rs
│   │   │   └── min_time.rs
│   │   ├── nmost.rs
│   │   ├── palloc.rs
│   │   ├── pg_any_element.rs
│   │   ├── range.rs
│   │   ├── raw.rs
│   │   ├── saturation.rs
│   │   ├── serialization/
│   │   │   ├── collations.rs
│   │   │   ├── functions.rs
│   │   │   └── types.rs
│   │   ├── serialization.rs
│   │   ├── stabilization_info.rs
│   │   ├── stabilization_tests.rs
│   │   ├── state_aggregate/
│   │   │   ├── accessors.rs
│   │   │   └── rollup.rs
│   │   ├── state_aggregate.rs
│   │   ├── stats_agg.rs
│   │   ├── tdigest.rs
│   │   ├── time_vector/
│   │   │   ├── iter.rs
│   │   │   ├── pipeline/
│   │   │   │   ├── aggregation.rs
│   │   │   │   ├── arithmetic.rs
│   │   │   │   ├── delta.rs
│   │   │   │   ├── expansion.rs
│   │   │   │   ├── fill_to.rs
│   │   │   │   ├── filter.rs
│   │   │   │   ├── lambda/
│   │   │   │   │   ├── executor.rs
│   │   │   │   │   ├── lambda_expr.pest
│   │   │   │   │   └── parser.rs
│   │   │   │   ├── lambda.rs
│   │   │   │   ├── map.rs
│   │   │   │   └── sort.rs
│   │   │   └── pipeline.rs
│   │   ├── time_vector.rs
│   │   ├── time_weighted_average/
│   │   │   └── accessors.rs
│   │   ├── time_weighted_average.rs
│   │   ├── type_builder.rs
│   │   ├── uddsketch.rs
│   │   └── utilities.rs
│   └── timescaledb_toolkit.control
├── tests/
│   └── update/
│       ├── candlestick.md
│       ├── heartbeat.md
│       ├── original_update_tests.md
│       ├── state_agg.md
│       ├── time-vector.md
│       └── time-weighted-average.md
└── tools/
    ├── build
    ├── dependencies.sh
    ├── install-timescaledb
    ├── post-install/
    │   ├── Cargo.toml
    │   └── src/
    │       ├── main.rs
    │       └── update_script.rs
    ├── release
    ├── sql-doctester/
    │   ├── Cargo.toml
    │   ├── Readme.md
    │   └── src/
    │       ├── main.rs
    │       ├── parser.rs
    │       ├── runner.rs
    │       └── startup.sql
    ├── testbin
    └── update-tester/
        ├── Cargo.toml
        ├── Readme.md
        └── src/
            ├── installer.rs
            ├── main.rs
            ├── parser.rs
            ├── testrunner/
            │   └── stabilization.rs
            └── testrunner.rs
Download .txt
Showing preview only (215K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (2504 symbols across 107 files)

FILE: crates/aggregate_builder/src/lib.rs
  function aggregate (line 19) | pub fn aggregate(_attr: TokenStream, item: TokenStream) -> TokenStream {
  type Aggregate (line 34) | struct Aggregate {
  type AggregateItem (line 50) | enum AggregateItem {
  type AggregateTy (line 56) | struct AggregateTy {
  type AggregateParallelSafe (line 61) | struct AggregateParallelSafe {
  type AggregateFn (line 65) | struct AggregateFn {
    method transition_fn_tokens (line 592) | fn transition_fn_tokens(
    method final_fn_tokens (line 705) | fn final_fn_tokens(
    method serialize_fn_tokens (line 762) | fn serialize_fn_tokens(
    method deserialize_fn_tokens (line 821) | fn deserialize_fn_tokens(
    method combine_fn_tokens (line 879) | fn combine_fn_tokens(
    method outer_ident (line 958) | fn outer_ident(&self, aggregate_name: &syn::Ident) -> syn::Ident {
    method sql_args (line 966) | fn sql_args(&self) -> impl Iterator<Item = (Option<&syn::Ident>, Strin...
  type AggregateArg (line 76) | struct AggregateArg {
  method parse (line 99) | fn parse(input: ParseStream) -> syn::Result<Self> {
  method parse (line 255) | fn parse(input: ParseStream) -> syn::Result<Self> {
  method parse (line 272) | fn parse(input: ParseStream) -> syn::Result<Self> {
  method parse (line 283) | fn parse(input: ParseStream) -> syn::Result<Self> {
  function is_fcinfo (line 304) | fn is_fcinfo(arg: &AggregateArg) -> bool {
  method parse (line 316) | fn parse(input: ParseStream) -> syn::Result<Self> {
  method parse (line 368) | fn parse(input: ParseStream) -> syn::Result<Self> {
  function take_attr (line 386) | fn take_attr(attrs: &mut Vec<syn::Attribute>, path: &syn::Path) -> Optio...
  function expand (line 401) | fn expand(agg: Aggregate) -> TokenStream2 {
  function arg_ident (line 977) | fn arg_ident(arg: &AggregateArg) -> syn::Pat {
  function make_mod_counters (line 981) | fn make_mod_counters() -> TokenStream2 {
  function ret_type (line 1018) | fn ret_type(ret: &syn::ReturnType) -> Cow<'_, syn::Type> {
  function state_type_check_tokens (line 1025) | fn state_type_check_tokens(ty: &syn::Type, optional: Option<()>) -> Toke...
  function refstate_type_check_tokens (line 1032) | fn refstate_type_check_tokens(ty: &syn::Type, optional: Option<()>) -> T...
  function bytea_type_check_tokens (line 1053) | fn bytea_type_check_tokens(ty: &syn::Type) -> TokenStream2 {
  function type_check_tokens (line 1057) | fn type_check_tokens(user_ty: &syn::Type, expected_type: syn::Type) -> T...

FILE: crates/asap/src/fft.rs
  function transform (line 35) | pub fn transform(real: &mut [f64], imag: &mut [f64]) {
  function inverse_transform (line 53) | pub fn inverse_transform(real: &mut [f64], imag: &mut [f64]) {
  function transform_radix2 (line 61) | fn transform_radix2(real: &mut [f64], imag: &mut [f64]) {
  function transform_bluestein (line 131) | fn transform_bluestein(real: &mut [f64], imag: &mut [f64]) {
  function convolve_complex (line 204) | fn convolve_complex(

FILE: crates/asap/src/lib.rs
  function asap_smooth (line 29) | pub fn asap_smooth(data: &[f64], resolution: u32) -> Vec<f64> {
  function binary_search (line 90) | fn binary_search(
  function sma (line 122) | fn sma(data: &[f64], range: u32, slide: u32) -> Vec<f64> {
  function mean (line 145) | fn mean(values: &[f64]) -> f64 {
  function std (line 149) | fn std(values: &[f64]) -> f64 {
  function new (line 157) | fn new(values: &'a [f64], max_lag: u32) -> Acf<'a> {
  function calculate (line 168) | fn calculate(&mut self) {
  function find_peaks (line 194) | fn find_peaks(&mut self) -> Vec<u32> {
  type Metrics (line 235) | struct Metrics<'a> {
  function new (line 242) | fn new(values: &[f64]) -> Metrics<'_> {
  function kurtosis (line 250) | fn kurtosis(&self) -> f64 {
  function roughness (line 262) | fn roughness(&self) -> f64 {
  function diffs (line 266) | fn diffs(&self) -> Vec<f64> {
  type Acf (line 275) | struct Acf<'a> {
  function simple_sma_test (line 287) | fn simple_sma_test() {
  function sma_slide_test (line 295) | fn sma_slide_test() {
  function sma_slide_unaliged_test (line 303) | fn sma_slide_unaliged_test() {
  function sma_downsample_test (line 311) | fn sma_downsample_test() {
  function test_roughness_and_kurtosis (line 319) | fn test_roughness_and_kurtosis() {
  function test_smoothing (line 348) | fn test_smoothing() {

FILE: crates/count-min-sketch/src/lib.rs
  type CountMinHashFn (line 20) | pub struct CountMinHashFn {
    method with_key (line 28) | pub fn with_key(key: u64) -> Self {
    method hash_into_buckets (line 37) | pub fn hash_into_buckets<T: Hash>(&self, item: &T, nbuckets: usize) ->...
    method key (line 46) | pub(crate) fn key(&self) -> u64 {
  constant SEED (line 24) | const SEED: u64 = 0x517cc1b727220a95;
  type CountMinSketch (line 64) | pub struct CountMinSketch {
    method new (line 77) | pub fn new(
    method with_dims_and_hashfn_keys (line 96) | pub fn with_dims_and_hashfn_keys(width: usize, depth: usize, keys: Vec...
    method with_dim (line 110) | pub fn with_dim(width: usize, depth: usize) -> Self {
    method with_prob (line 130) | pub fn with_prob(epsilon: f64, delta: f64) -> Self {
    method width (line 139) | pub fn width(&self) -> usize {
    method depth (line 144) | pub fn depth(&self) -> usize {
    method hash_keys (line 150) | pub fn hash_keys(&self) -> Vec<u64> {
    method counters (line 158) | pub fn counters(&self) -> &Vec<Vec<i64>> {
    method estimate (line 164) | pub fn estimate<T: Hash>(&self, item: T) -> i64 {
    method get_bucket_indices (line 182) | pub fn get_bucket_indices<T: Hash>(&self, item: T) -> Vec<usize> {
    method add_value (line 190) | pub fn add_value<T: Hash>(&mut self, item: T) {
    method subtract_value (line 198) | pub fn subtract_value<T: Hash>(&mut self, item: T) {
    method combine (line 209) | pub fn combine(&mut self, other: CountMinSketch) {
    method fmt (line 222) | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {

FILE: crates/count-min-sketch/tests/lib.rs
  function empty_sketch (line 4) | fn empty_sketch() {
  function add_once (line 10) | fn add_once() {
  function subtract_is_inverse_of_add (line 17) | fn subtract_is_inverse_of_add() {
  function add_repeated (line 25) | fn add_repeated() {
  function add_repeated_with_collisions (line 34) | fn add_repeated_with_collisions() {

FILE: crates/counter-agg/src/lib.rs
  type CounterError (line 12) | pub enum CounterError {
    method fmt (line 300) | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
  type MetricSummary (line 25) | pub struct MetricSummary {
    method new (line 67) | pub fn new(pt: &TSPoint, bounds: Option<range::I64Range>) -> MetricSum...
    method reset (line 83) | fn reset(&mut self, incoming: &TSPoint) {
    method add_point (line 91) | fn add_point(&mut self, incoming: &TSPoint) -> Result<(), CounterError> {
    method single_value (line 117) | fn single_value(&self) -> bool {
    method combine (line 122) | fn combine(&mut self, incoming: &MetricSummary) -> Result<(), CounterE...
    method time_delta (line 159) | pub fn time_delta(&self) -> f64 {
    method delta (line 163) | pub fn delta(&self) -> f64 {
    method rate (line 167) | pub fn rate(&self) -> Option<f64> {
    method idelta_left (line 174) | pub fn idelta_left(&self) -> f64 {
    method idelta_right (line 183) | pub fn idelta_right(&self) -> f64 {
    method irate_left (line 192) | pub fn irate_left(&self) -> Option<f64> {
    method irate_right (line 200) | pub fn irate_right(&self) -> Option<f64> {
    method bounds_valid (line 208) | pub fn bounds_valid(&self) -> bool {
    method bounds_extend (line 215) | fn bounds_extend(&mut self, in_bounds: Option<range::I64Range>) {
    method prometheus_delta (line 228) | pub fn prometheus_delta(&self) -> Result<Option<f64>, CounterError> {
    method prometheus_rate (line 284) | pub fn prometheus_rate(&self) -> Result<Option<f64>, CounterError> {
  function ts_to_xy (line 52) | fn ts_to_xy(pt: TSPoint) -> XYPair<f64> {
  function to_seconds (line 59) | fn to_seconds(t: f64) -> f64 {
  type GaugeSummaryBuilder (line 312) | pub struct GaugeSummaryBuilder(MetricSummary);
    method new (line 315) | pub fn new(pt: &TSPoint, bounds: Option<range::I64Range>) -> Self {
    method add_point (line 320) | pub fn add_point(&mut self, incoming: &TSPoint) -> Result<(), CounterE...
    method combine (line 325) | pub fn combine(&mut self, incoming: &MetricSummary) -> Result<(), Coun...
    method set_bounds (line 329) | pub fn set_bounds(&mut self, bounds: Option<range::I64Range>) {
    method build (line 333) | pub fn build(self) -> MetricSummary {
    method first (line 337) | pub fn first(&self) -> &TSPoint {
    method bounds_valid (line 342) | pub fn bounds_valid(&self) -> bool {
    method from (line 348) | fn from(summary: MetricSummary) -> Self {
  type CounterSummaryBuilder (line 354) | pub struct CounterSummaryBuilder(MetricSummary);
    method new (line 357) | pub fn new(pt: &TSPoint, bounds: Option<range::I64Range>) -> Self {
    method add_point (line 362) | pub fn add_point(&mut self, incoming: &TSPoint) -> Result<(), CounterE...
    method combine (line 368) | pub fn combine(&mut self, incoming: &MetricSummary) -> Result<(), Coun...
    method set_bounds (line 373) | pub fn set_bounds(&mut self, bounds: Option<range::I64Range>) {
    method build (line 377) | pub fn build(self) -> MetricSummary {
    method first (line 381) | pub fn first(&self) -> &TSPoint {
    method bounds_valid (line 386) | pub fn bounds_valid(&self) -> bool {
    method from (line 392) | fn from(summary: MetricSummary) -> Self {

FILE: crates/counter-agg/src/range.rs
  type I64Range (line 10) | pub struct I64Range {
    method has_infinite (line 16) | pub fn has_infinite(&self) -> bool {
    method is_valid (line 26) | fn is_valid(&self) -> bool {
    method is_singleton (line 33) | pub fn is_singleton(&self) -> bool {
    method extend (line 40) | pub fn extend(&mut self, other: &Self) {
    method contains (line 54) | pub fn contains(&self, pt: i64) -> bool {
    method duration (line 66) | pub fn duration(&self) -> Option<i64> {
  function test_extend (line 78) | fn test_extend() {
  function test_contains (line 243) | fn test_contains() {
  function test_duration (line 281) | fn test_duration() {
  function test_checks (line 311) | fn test_checks() {

FILE: crates/counter-agg/src/tests.rs
  function to_micro (line 6) | fn to_micro(t: f64) -> f64 {
  function assert_close_enough (line 11) | pub fn assert_close_enough(p1: &MetricSummary, p2: &MetricSummary) {
  function create (line 27) | fn create() {
  function adding_point (line 37) | fn adding_point() {
  function adding_points_to_counter (line 54) | fn adding_points_to_counter() {
  function adding_out_of_order_counter (line 82) | fn adding_out_of_order_counter() {
  function test_counter_delta (line 94) | fn test_counter_delta() {
  function test_combine (line 111) | fn test_combine() {
  function test_combine_with_small_summary (line 141) | fn test_combine_with_small_summary() {
  function test_multiple_resets (line 161) | fn test_multiple_resets() {
  function test_extraction_single_point (line 209) | fn test_extraction_single_point() {
  function test_extraction_simple (line 223) | fn test_extraction_simple() {
  function test_extraction_with_resets (line 241) | fn test_extraction_with_resets() {
  function test_bounds (line 259) | fn test_bounds() {
  function test_prometheus_extrapolation_simple (line 376) | fn test_prometheus_extrapolation_simple() {
  function test_prometheus_extrapolation_bound_size (line 508) | fn test_prometheus_extrapolation_bound_size() {

FILE: crates/encodings/src/lib.rs
  function i64_decoder (line 4) | pub fn i64_decoder() -> impl FnMut(i64) -> i64 {
  function u64_decoder (line 13) | pub fn u64_decoder() -> impl FnMut(u64) -> u64 {
  function i64_encoder (line 23) | pub fn i64_encoder() -> impl FnMut(i64) -> i64 {
  function u64_encoder (line 32) | pub fn u64_encoder() -> impl FnMut(u64) -> u64 {
  function quick_test_roundtrip_u64 (line 48) | fn quick_test_roundtrip_u64(values: Vec<u64>) -> bool {
  function quick_test_roundtrip_i64 (line 63) | fn quick_test_roundtrip_i64(values: Vec<i64>) -> bool {
  function encode (line 81) | pub fn encode(n: i64) -> u64 {
  function decode (line 93) | pub fn decode(n: u64) -> i64 {
  function size_vec (line 126) | pub fn size_vec<I: Iterator<Item = u64>>(bytes: &mut Vec<u8>, values: I) {
  function bytes_for_value (line 132) | pub fn bytes_for_value(value: u64) -> u32 {
  type I64Compressor (line 140) | pub struct I64Compressor<F: FnMut(i64) -> i64> {
  function new (line 146) | pub fn new() -> Self {
  method default (line 155) | fn default() -> Self {
  function with (line 161) | pub fn with(encoder: F) -> Self {
  function push (line 168) | pub fn push(&mut self, value: i64) {
  function finish (line 173) | pub fn finish(self) -> Vec<u8> {
  type U64Compressor (line 178) | pub struct U64Compressor<F: FnMut(u64) -> u64> {
  function new (line 184) | pub fn new() -> Self {
  method default (line 193) | fn default() -> Self {
  function with (line 199) | pub fn with(encoder: F) -> Self {
  function push (line 206) | pub fn push(&mut self, value: u64) {
  function finish (line 211) | pub fn finish(self) -> Vec<u8> {
  function is_empty (line 215) | pub fn is_empty(&self) -> bool {
  function compress_i64s_to_vec (line 220) | pub fn compress_i64s_to_vec<I: Iterator<Item = i64>>(bytes: &mut Vec<u8>...
  function compress_u64s_to_vec (line 224) | pub fn compress_u64s_to_vec<I: Iterator<Item = u64>>(bytes: &mut Vec<u8>...
  function write_to_vec (line 231) | pub fn write_to_vec(out: &mut Vec<u8>, mut value: u64) {
  type Value (line 250) | type Value = u64;
  function i64_decompressor (line 252) | pub fn i64_decompressor(bytes: &[u8]) -> impl Iterator<Item = i64> + '_ {
  function u64_decompressor (line 256) | pub fn u64_decompressor(mut bytes: &[u8]) -> impl Iterator<Item = u64> +...
  function read_from_slice (line 269) | pub fn read_from_slice(bytes: &[u8]) -> (Value, usize) {
  function prefix_length (line 295) | pub fn prefix_length(tag_byte: u8) -> u32 {
  function quick_test_roundtrip_u64 (line 306) | fn quick_test_roundtrip_u64(values: Vec<u64>) -> bool {
  function quick_test_roundtrip_i64 (line 316) | fn quick_test_roundtrip_i64(values: Vec<i64>) -> bool {

FILE: crates/flat_serialize/example_generated.rs
  type Basic (line 4) | pub struct Basic<'input> {
  constant _ (line 12) | const _: () = {
  constant _ (line 68) | const _: () = {
  constant REQUIRED_ALIGNMENT (line 81) | const REQUIRED_ALIGNMENT: usize = {
  constant MAX_PROVIDED_ALIGNMENT (line 106) | const MAX_PROVIDED_ALIGNMENT: Option<usize> = {
  constant MIN_LEN (line 160) | const MIN_LEN: usize = {
  constant TRIVIAL_COPY (line 170) | const TRIVIAL_COPY: bool = false;
  type SLICE (line 171) | type SLICE = flat_serialize::Iterable<'input, Basic<'input>>;
  function try_ref (line 174) | unsafe fn try_ref(
  function fill_slice (line 285) | unsafe fn fill_slice<'out>(
  function len (line 320) | fn len(&self) -> usize {
  type Optional (line 337) | pub struct Optional {
    constant REQUIRED_ALIGNMENT (line 386) | const REQUIRED_ALIGNMENT: usize = {
    constant MAX_PROVIDED_ALIGNMENT (line 403) | const MAX_PROVIDED_ALIGNMENT: Option<usize> = {
    constant MIN_LEN (line 449) | const MIN_LEN: usize = {
    constant TRIVIAL_COPY (line 457) | const TRIVIAL_COPY: bool = false;
    type SLICE (line 458) | type SLICE = flat_serialize::Iterable<'a, Optional>;
    method try_ref (line 461) | unsafe fn try_ref(mut input: &[u8]) -> Result<(Self, &[u8]), flat_seri...
    method fill_slice (line 534) | unsafe fn fill_slice<'out>(
    method len (line 562) | fn len(&self) -> usize {
  constant _ (line 343) | const _: () = {
  constant _ (line 377) | const _: () = {
  type Nested (line 579) | pub struct Nested<'a> {
  constant _ (line 584) | const _: () = {
  constant _ (line 608) | const _: () = {
  constant REQUIRED_ALIGNMENT (line 615) | const REQUIRED_ALIGNMENT: usize = {
  constant MAX_PROVIDED_ALIGNMENT (line 628) | const MAX_PROVIDED_ALIGNMENT: Option<usize> = {
  constant MIN_LEN (line 661) | const MIN_LEN: usize = {
  constant TRIVIAL_COPY (line 668) | const TRIVIAL_COPY: bool = false;
  type SLICE (line 669) | type SLICE = flat_serialize::Iterable<'a, Nested<'a>>;
  function try_ref (line 672) | unsafe fn try_ref(mut input: &'a [u8]) -> Result<(Self, &'a [u8]), flat_...
  function fill_slice (line 718) | unsafe fn fill_slice<'out>(
  function len (line 736) | fn len(&self) -> usize {
  type NestedOptional (line 744) | pub struct NestedOptional {
    constant REQUIRED_ALIGNMENT (line 782) | const REQUIRED_ALIGNMENT: usize = {
    constant MAX_PROVIDED_ALIGNMENT (line 795) | const MAX_PROVIDED_ALIGNMENT: Option<usize> = {
    constant MIN_LEN (line 834) | const MIN_LEN: usize = {
    constant TRIVIAL_COPY (line 841) | const TRIVIAL_COPY: bool = false;
    type SLICE (line 842) | type SLICE = flat_serialize::Iterable<'a, NestedOptional>;
    method try_ref (line 845) | unsafe fn try_ref(mut input: &[u8]) -> Result<(Self, &[u8]), flat_seri...
    method fill_slice (line 902) | unsafe fn fill_slice<'out>(
    method len (line 923) | fn len(&self) -> usize {
  constant _ (line 749) | const _: () = {
  constant _ (line 775) | const _: () = {
  type NestedSlice (line 935) | pub struct NestedSlice<'b> {
  constant _ (line 940) | const _: () = {
  constant _ (line 966) | const _: () = {
  constant REQUIRED_ALIGNMENT (line 973) | const REQUIRED_ALIGNMENT: usize = {
  constant MAX_PROVIDED_ALIGNMENT (line 986) | const MAX_PROVIDED_ALIGNMENT: Option<usize> = {
  constant MIN_LEN (line 1019) | const MIN_LEN: usize = {
  constant TRIVIAL_COPY (line 1026) | const TRIVIAL_COPY: bool = false;
  type SLICE (line 1027) | type SLICE = flat_serialize::Iterable<'b, NestedSlice<'b>>;
  function try_ref (line 1030) | unsafe fn try_ref(mut input: &'b [u8]) -> Result<(Self, &'b [u8]), flat_...
  function fill_slice (line 1084) | unsafe fn fill_slice<'out>(
  function len (line 1103) | fn len(&self) -> usize {
  type BasicEnum (line 1111) | pub enum BasicEnum<'input> {
  constant _ (line 1121) | const _: () = {
  constant _ (line 1177) | const _: () = {
  constant _ (line 1184) | const _: () = {
  constant REQUIRED_ALIGNMENT (line 1203) | const REQUIRED_ALIGNMENT: usize = {
  constant MAX_PROVIDED_ALIGNMENT (line 1237) | const MAX_PROVIDED_ALIGNMENT: Option<usize> = {
  constant MIN_LEN (line 1330) | const MIN_LEN: usize = {
  constant TRIVIAL_COPY (line 1359) | const TRIVIAL_COPY: bool = false;
  type SLICE (line 1360) | type SLICE = flat_serialize::Iterable<'input, BasicEnum<'input>>;
  function try_ref (line 1363) | unsafe fn try_ref(
  function fill_slice (line 1467) | unsafe fn fill_slice<'out>(
  function len (line 1501) | fn len(&self) -> usize {
  type PaddedEnum (line 1516) | pub enum PaddedEnum<'input> {
  constant _ (line 1528) | const _: () = {
  constant _ (line 1604) | const _: () = {
  constant _ (line 1611) | const _: () = {
  constant REQUIRED_ALIGNMENT (line 1634) | const REQUIRED_ALIGNMENT: usize = {
  constant MAX_PROVIDED_ALIGNMENT (line 1676) | const MAX_PROVIDED_ALIGNMENT: Option<usize> = {
  constant MIN_LEN (line 1785) | const MIN_LEN: usize = {
  constant TRIVIAL_COPY (line 1816) | const TRIVIAL_COPY: bool = false;
  type SLICE (line 1817) | type SLICE = flat_serialize::Iterable<'input, PaddedEnum<'input>>;
  function try_ref (line 1820) | unsafe fn try_ref(
  function fill_slice (line 1955) | unsafe fn fill_slice<'out>(
  function len (line 1999) | fn len(&self) -> usize {

FILE: crates/flat_serialize/flat_serialize/src/lib.rs
  type WrapErr (line 8) | pub enum WrapErr {
  type FlatSerializable (line 25) | pub unsafe trait FlatSerializable<'input>: Sized + 'input {
    constant MIN_LEN (line 26) | const MIN_LEN: usize;
    constant REQUIRED_ALIGNMENT (line 27) | const REQUIRED_ALIGNMENT: usize;
    constant MAX_PROVIDED_ALIGNMENT (line 28) | const MAX_PROVIDED_ALIGNMENT: Option<usize>;
    constant TRIVIAL_COPY (line 29) | const TRIVIAL_COPY: bool = false;
    method try_ref (line 34) | unsafe fn try_ref(input: &'input [u8]) -> Result<(Self, &'input [u8]),...
    method fill_vec (line 35) | fn fill_vec(&self, input: &mut Vec<u8>) {
    method fill_slice (line 54) | unsafe fn fill_slice<'out>(
    method num_bytes (line 58) | fn num_bytes(&self) -> usize;
    method make_owned (line 60) | fn make_owned(&mut self);
    method into_owned (line 61) | fn into_owned(self) -> Self::OWNED;
  constant MIN_LEN (line 131) | const MIN_LEN: usize = { T::MIN_LEN * N };
  constant REQUIRED_ALIGNMENT (line 132) | const REQUIRED_ALIGNMENT: usize = T::REQUIRED_ALIGNMENT;
  constant MAX_PROVIDED_ALIGNMENT (line 133) | const MAX_PROVIDED_ALIGNMENT: Option<usize> = T::MAX_PROVIDED_ALIGNMENT;
  constant TRIVIAL_COPY (line 134) | const TRIVIAL_COPY: bool = T::TRIVIAL_COPY;
  type SLICE (line 136) | type SLICE = Slice<'i, [T; N]>;
  type OWNED (line 137) | type OWNED = [T::OWNED; N];
  function try_ref (line 140) | unsafe fn try_ref(mut input: &'i [u8]) -> Result<(Self, &'i [u8]), WrapE...
  function fill_slice (line 158) | unsafe fn fill_slice<'out>(
  function num_bytes (line 179) | fn num_bytes(&self) -> usize {
  function make_owned (line 183) | fn make_owned(&mut self) {
  function into_owned (line 189) | fn into_owned(self) -> Self::OWNED {
  type Slice (line 203) | pub enum Slice<'input, T: 'input> {
  function iter (line 210) | pub fn iter<'s>(&'s self) -> Iter<'input, 's, T>
  type Item (line 226) | type Item = T;
  type IntoIter (line 228) | type IntoIter = Iter<'input, 'input, T>;
  function into_iter (line 230) | fn into_iter(self) -> Self::IntoIter {
  type Iter (line 239) | pub enum Iter<'input, 'borrow, T: 'input> {
  type Item (line 249) | type Item = T;
  method next (line 251) | fn next(&mut self) -> Option<Self::Item> {
  method nth (line 274) | fn nth(&mut self, n: usize) -> Option<Self::Item> {
  function len (line 295) | pub fn len(&self) -> usize {
  function is_empty (line 303) | pub fn is_empty(&self) -> bool {
  function fmt (line 316) | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
  method eq (line 325) | fn eq(&self, other: &Self) -> bool {
  type Unflatten (line 333) | pub struct Unflatten<'input, T: 'input> {
  function from_bytes (line 340) | pub unsafe fn from_bytes(bytes: &'input [u8]) -> Self {
  function len (line 347) | pub fn len(&self) -> usize
  function is_empty (line 358) | pub fn is_empty(&self) -> bool
  function make_owned (line 369) | pub fn make_owned(&mut self)
  function into_vec (line 376) | pub fn into_vec(self) -> Vec<T::OWNED>
  function into_owned (line 387) | pub fn into_owned(self) -> Slice<'static, T::OWNED>
  function as_owned (line 394) | pub fn as_owned(&mut self) -> &mut Vec<T>
  function as_slice (line 414) | pub fn as_slice(&self) -> &[T]
  function slice (line 425) | pub fn slice(&self) -> &'input [T]
  type Item (line 440) | type Item = T;
  method next (line 442) | fn next(&mut self) -> Option<Self::Item> {
  function from (line 453) | fn from(val: &'input [T]) -> Self {
  function from (line 459) | fn from(val: Vec<T>) -> Self {
  method clone (line 468) | fn clone(&self) -> Self {
  function serialize (line 481) | fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
  function deserialize (line 499) | fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
  method clone (line 509) | fn clone(&self) -> Self {
  type VariableLen (line 517) | pub unsafe trait VariableLen<'input>: Sized {
    method try_ref (line 519) | unsafe fn try_ref(input: &'input [u8], count: usize) -> Result<(Self, ...
    method fill_slice (line 522) | unsafe fn fill_slice<'out>(
    method num_bytes (line 527) | fn num_bytes(&self, count: usize) -> usize;
  function try_ref (line 535) | unsafe fn try_ref(input: &'i [u8], count: usize) -> Result<(Self, &'i [u...
  function fill_slice (line 552) | unsafe fn fill_slice<'out>(
  function num_bytes (line 571) | fn num_bytes(&self, count: usize) -> usize {
  function try_ref (line 585) | unsafe fn try_ref(input: &'i [u8], count: usize) -> Result<(Self, &'i [u...
  function fill_slice (line 618) | unsafe fn fill_slice<'out>(
  function num_bytes (line 630) | fn num_bytes(&self, count: usize) -> usize {
  function fill_slice_from_iter (line 639) | unsafe fn fill_slice_from_iter<
  function len_of_iterable (line 667) | fn len_of_iterable<'i, T: FlatSerializable<'i>, V: ValOrRef<T>, I: Itera...
  function aligning_len (line 687) | fn aligning_len(ptr: *const MaybeUninit<u8>, align: usize) -> usize {
  type ValOrRef (line 695) | trait ValOrRef<T: ?Sized> {
    method to_ref (line 696) | fn to_ref(&self) -> &T;
  method to_ref (line 700) | fn to_ref(&self) -> &T {
  function to_ref (line 706) | fn to_ref(&self) -> &T {
  function basic (line 729) | fn basic() {
  function bad_len1 (line 797) | fn bad_len1() {
  function bad_len2 (line 812) | fn bad_len2() {
  constant _TEST_NO_VARIABLE_LEN_NO_LIFETIME (line 834) | const _TEST_NO_VARIABLE_LEN_NO_LIFETIME: Optional = Optional {
  function optional_present (line 841) | fn optional_present() {
  function optional_absent (line 881) | fn optional_absent() {
  function nested (line 923) | fn nested() {
  function nested_optional (line 990) | fn nested_optional() {
  function nested_slice (line 1042) | fn nested_slice() {
  function basic_enum1 (line 1122) | fn basic_enum1() {
  function basic_enum2 (line 1148) | fn basic_enum2() {
  function padded_enum1 (line 1195) | fn padded_enum1() {
  function padded_enum2 (line 1239) | fn padded_enum2() {
  function many_enum (line 1279) | fn many_enum() {
  function test_no_refrence (line 1377) | fn test_no_refrence() {
  function test_size_align_struct (line 1445) | fn test_size_align_struct() {
  function test_size_align_enum (line 1647) | fn test_size_align_enum() {
  type Foo (line 1809) | struct Foo {
  constant _ (line 1814) | const _: () = {
  function foo (line 1821) | fn foo() {
  type Bar (line 1849) | enum Bar {
  constant _ (line 1854) | const _: () = {
  function fs_enum_a (line 1861) | fn fs_enum_a() {
  function fs_enum_b (line 1885) | fn fs_enum_b() {
  function fs_enum_non (line 1904) | fn fs_enum_non() {

FILE: crates/flat_serialize/flat_serialize_macro/src/lib.rs
  function flat_serialize (line 15) | pub fn flat_serialize(input: TokenStream) -> TokenStream {
  type FlatSerialize (line 29) | enum FlatSerialize {
  type FlatSerializeEnum (line 50) | struct FlatSerializeEnum {
    method variants (line 392) | fn variants(&self, lifetime: Option<&TokenStream2>) -> TokenStream2 {
    method uniqueness_check (line 415) | fn uniqueness_check(&self) -> TokenStream2 {
    method alignment_check (line 434) | fn alignment_check(&self) -> TokenStream2 {
    method fn_trait_check (line 453) | fn fn_trait_check(&self) -> TokenStream2 {
    method fn_required_alignment (line 468) | fn fn_required_alignment(&self) -> TokenStream2 {
    method fn_max_provided_alignment (line 501) | fn fn_max_provided_alignment(&self) -> TokenStream2 {
    method fn_min_len (line 573) | fn fn_min_len(&self) -> TokenStream2 {
    method fn_try_ref (line 605) | fn fn_try_ref(&self, lifetime: Option<&TokenStream2>) -> TokenStream2 {
    method fn_fill_slice (line 662) | fn fn_fill_slice(&self) -> TokenStream2 {
    method fn_len (line 694) | fn fn_len(&self) -> TokenStream2 {
  type FlatSerializeVariant (line 59) | struct FlatSerializeVariant {
  type FlatSerializeStruct (line 108) | struct FlatSerializeStruct {
    method alignment_check (line 721) | fn alignment_check(&self, start: TokenStream2, min_align: TokenStream2...
    method fn_trait_check (line 734) | fn fn_trait_check(&self) -> TokenStream2 {
    method fn_required_alignment (line 743) | fn fn_required_alignment(&self) -> TokenStream2 {
    method fn_max_provided_alignment (line 760) | fn fn_max_provided_alignment(&self) -> TokenStream2 {
    method fn_min_len (line 795) | fn fn_min_len(&self) -> TokenStream2 {
    method fn_try_ref (line 807) | fn fn_try_ref(&self, lifetime: Option<&TokenStream2>) -> TokenStream2 {
    method fn_try_ref_body (line 836) | fn fn_try_ref_body(&self, break_label: &syn::Lifetime) -> TryRefBody {
    method fn_fill_slice (line 866) | fn fn_fill_slice(&self) -> TokenStream2 {
    method fill_slice_body (line 882) | fn fill_slice_body(&self) -> (TokenStream2, TokenStream2) {
    method fn_len (line 892) | fn fn_len(&self) -> TokenStream2 {
  type FlatSerializeField (line 115) | struct FlatSerializeField {
    method alignment_check (line 909) | fn alignment_check(&self) -> TokenStream2 {
    method trait_check (line 950) | fn trait_check(&self) -> TokenStream2 {
    method required_alignment (line 976) | fn required_alignment(&self) -> TokenStream2 {
    method max_provided_alignment (line 986) | fn max_provided_alignment(&self) -> TokenStream2 {
    method min_len (line 1033) | fn min_len(&self) -> TokenStream2 {
    method try_wrap (line 1047) | fn try_wrap(&self, break_label: &syn::Lifetime) -> TokenStream2 {
    method fill_slice (line 1111) | fn fill_slice(&self) -> TokenStream2 {
    method err_size (line 1154) | fn err_size(&self) -> TokenStream2 {
    method exposed_ty (line 1185) | fn exposed_ty(&self, lifetime: Option<&TokenStream2>) -> TokenStream2 {
    method local_ty (line 1212) | fn local_ty(&self) -> TokenStream2 {
    method size_fn (line 1235) | fn size_fn(&self) -> TokenStream2 {
    method make_owned (line 1270) | fn make_owned(&self) -> TokenStream2 {
    method into_owned (line 1294) | fn into_owned(&self) -> TokenStream2 {
    method declaration (line 1317) | fn declaration<'a, 'b: 'a>(
    method per_field_attrs (line 1331) | fn per_field_attrs<'a, 'b: 'a>(
    method ty_without_lifetime (line 1347) | fn ty_without_lifetime(&self) -> TokenStream2 {
    method is_optional (line 1357) | fn is_optional(&self) -> bool {
  type PerFieldsAttr (line 125) | struct PerFieldsAttr {
  type VariableLenFieldInfo (line 131) | struct VariableLenFieldInfo {
    method len_from_bytes (line 338) | fn len_from_bytes(&self) -> TokenStream2 {
    method counter_expr (line 345) | fn counter_expr(&self) -> TokenStream2 {
    method err_size_expr (line 352) | fn err_size_expr(&self) -> TokenStream2 {
    method ty_without_lifetime (line 1369) | fn ty_without_lifetime(&self) -> TokenStream2 {
  function flat_serialize_struct (line 141) | fn flat_serialize_struct(input: FlatSerializeStruct) -> TokenStream2 {
  function flat_serialize_enum (line 243) | fn flat_serialize_enum(input: FlatSerializeEnum) -> TokenStream2 {
  type SelfReplacer (line 364) | struct SelfReplacer<F: FnMut(&Ident) -> syn::Expr>(F);
  method visit_expr_mut (line 367) | fn visit_expr_mut(&mut self, expr: &mut syn::Expr) {
  type TryRefBody (line 384) | struct TryRefBody {
  function flat_serializable_derive (line 1381) | pub fn flat_serializable_derive(input: TokenStream) -> TokenStream {

FILE: crates/flat_serialize/flat_serialize_macro/src/parser.rs
  constant LIBRARY_MARKER (line 21) | const LIBRARY_MARKER: &str = "flat_serialize";
  function flat_serialize_attr_path (line 23) | fn flat_serialize_attr_path(att_name: &str) -> syn::Path {
  method parse (line 30) | fn parse(input: ParseStream) -> Result<Self> {
  method parse (line 62) | fn parse(input: ParseStream) -> Result<Self> {
  method parse (line 86) | fn parse(input: ParseStream) -> Result<Self> {
  method parse (line 117) | fn parse(input: ParseStream) -> Result<Self> {
  method parse (line 139) | fn parse(input: ParseStream) -> Result<Self> {
  type Target (line 194) | type Target = Field;
  method deref (line 196) | fn deref(&self) -> &Self::Target {
  method parse (line 202) | fn parse(input: ParseStream) -> Result<Self> {
  function has_self_field (line 275) | fn has_self_field(expr: &Expr) -> bool {
  type FindSelf (line 281) | struct FindSelf(bool);
    method visit_path_segment (line 284) | fn visit_path_segment(&mut self, i: &'ast syn::PathSegment) {
  function validate_self_fields (line 305) | fn validate_self_fields<'a>(fields: impl Iterator<Item = &'a mut FlatSer...
  function validate_self_field (line 318) | fn validate_self_field(
  type ValidateLenFields (line 330) | struct ValidateLenFields<'a, 'b>(Option<TokenStream2>, &'b HashSet<&'a I...
  function visit_expr (line 333) | fn visit_expr(&mut self, expr: &'ast syn::Expr) {
  function as_turbofish (line 358) | pub fn as_turbofish(ty: &Type) -> TokenStream2 {
  function has_lifetime (line 411) | pub fn has_lifetime(ty: &Type) -> bool {

FILE: crates/hyperloglogplusplus/src/dense.rs
  type Storage (line 9) | pub struct Storage<'s> {
  function new (line 18) | pub fn new(precision: u8) -> Self {
  function from_parts (line 33) | pub fn from_parts(registers: &'s [u8], precision: u8) -> Self {
  function into_owned (line 43) | pub fn into_owned(&self) -> Storage<'static> {
  function add_hash (line 52) | pub fn add_hash(&mut self, hash: u64) {
  function add_encoded (line 57) | pub fn add_encoded(&mut self, encoded: crate::sparse::Encoded) {
  function idx_count_from_hash (line 62) | fn idx_count_from_hash(&self, hash: u64) -> (usize, u8) {
  function idx_count_from_encoded (line 70) | fn idx_count_from_encoded(&self, encoded: crate::sparse::Encoded) -> (us...
  function estimate_count (line 77) | pub fn estimate_count(&self) -> u64 {
  function linear_counting (line 106) | fn linear_counting(&self, v: f64) -> f64 {
  function threshold (line 111) | fn threshold(&self) -> f64 {
  function a_m (line 115) | fn a_m(&self) -> f64 {
  function estimate_bias (line 126) | fn estimate_bias(&self, estimate: f64) -> f64 {
  function merge_in (line 208) | pub fn merge_in(&mut self, other: &Storage<'_>) {
  function num_bytes (line 229) | pub fn num_bytes(&self) -> usize {
  function hash (line 247) | pub fn hash<V: Hash>(val: V) -> u64 {
  function new_panics_b3 (line 255) | fn new_panics_b3() {
  function new_works_b4 (line 260) | fn new_works_b4() {
  function new_works_b18 (line 265) | fn new_works_b18() {
  function new_panics_b19 (line 271) | fn new_panics_b19() {
  function empty (line 276) | fn empty() {
  function add_b4_n1k (line 281) | fn add_b4_n1k() {
  function add_b8_n1k (line 291) | fn add_b8_n1k() {
  function add_b12_n1k (line 301) | fn add_b12_n1k() {
  function add_b16_n1k (line 310) | fn add_b16_n1k() {
  function add_b8_n10k (line 319) | fn add_b8_n10k() {
  function add_b12_n10k (line 328) | fn add_b12_n10k() {
  function add_b16_n10k (line 337) | fn add_b16_n10k() {
  function add_b16_n100k (line 346) | fn add_b16_n100k() {
  function add_b16_n1m (line 355) | fn add_b16_n1m() {
  function clone (line 364) | fn clone() {
  function merge (line 383) | fn merge() {
  function merge_panics_p (line 404) | fn merge_panics_p() {
  function issue_74 (line 411) | fn issue_74() {
  function quick_16 (line 480) | fn quick_16(values: HashSet<u64>) -> quickcheck::TestResult {
  function quick_8 (line 499) | fn quick_8(values: Vec<u64>) -> quickcheck::TestResult {
  function quick_decode_16 (line 517) | fn quick_decode_16(value: u64) -> bool {

FILE: crates/hyperloglogplusplus/src/hyperloglog_data.rs
  constant THRESHOLD_DATA_OFFSET (line 6) | pub(crate) const THRESHOLD_DATA_OFFSET: usize = 4;
  constant THRESHOLD_DATA_VEC (line 7) | pub(crate) const THRESHOLD_DATA_VEC: &[usize] = &[
  constant RAW_ESTIMATE_DATA_OFFSET (line 25) | pub(crate) const RAW_ESTIMATE_DATA_OFFSET: usize = 4;
  constant RAW_ESTIMATE_DATA_VEC (line 26) | pub(crate) const RAW_ESTIMATE_DATA_VEC: &[&[f64]] = &[
  constant BIAS_DATA_OFFSET (line 1129) | pub(crate) const BIAS_DATA_OFFSET: usize = 4;
  constant BIAS_DATA_VEC (line 1130) | pub(crate) const BIAS_DATA_VEC: &[&[f64]] = &[

FILE: crates/hyperloglogplusplus/src/lib.rs
  type HyperLogLog (line 18) | pub struct HyperLogLog<'s, T: ?Sized, B> {
  type HyperLogLogStorage (line 25) | pub enum HyperLogLogStorage<'s> {
  function new (line 31) | pub fn new(precision: u8, buildhasher: B) -> Self {
  function from_sparse_parts (line 39) | pub fn from_sparse_parts(
  function from_dense_parts (line 56) | pub fn from_dense_parts(bytes: &'s [u8], precision: u8, buildhasher: B) ...
  function estimate_count (line 64) | pub fn estimate_count(&mut self) -> u64 {
  function immutable_estimate_count (line 73) | pub fn immutable_estimate_count(&self) -> u64 {
  function is_sparse (line 82) | pub fn is_sparse(&self) -> bool {
  function num_bytes (line 88) | pub fn num_bytes(&self) -> usize {
  function to_parts (line 97) | pub fn to_parts(&mut self) -> &HyperLogLogStorage<'s> {
  function merge_all (line 102) | pub fn merge_all(&mut self) {
  function into_owned (line 109) | pub fn into_owned(&self) -> HyperLogLog<'static, T, B>
  function add (line 131) | pub fn add(&mut self, value: &T) {
  function merge_in (line 147) | pub fn merge_in(&mut self, other: &HyperLogLog<'_, T, B>) {
  type Extractable (line 168) | pub(crate) trait Extractable:
    constant NUM_BITS (line 171) | const NUM_BITS: u8;
    method extract_bits (line 172) | fn extract_bits(&self, high: u8, low: u8) -> Self {
    method extract (line 175) | fn extract(&self, high: u8, len: u8) -> Self {
    method q (line 178) | fn q(&self) -> u8;
    constant NUM_BITS (line 182) | const NUM_BITS: u8 = 64;
    method q (line 183) | fn q(&self) -> u8 {
    constant NUM_BITS (line 189) | const NUM_BITS: u8 = 32;
    method q (line 190) | fn q(&self) -> u8 {
  function error_for_precision (line 195) | pub fn error_for_precision(precision: u8) -> f64 {
  function precision_for_error (line 199) | pub fn precision_for_error(max_error: f64) -> u8 {
  function test_asc_4_10k (line 222) | fn test_asc_4_10k() {
  function test_asc_4_100k (line 234) | fn test_asc_4_100k() {
  function test_asc_4_500k (line 246) | fn test_asc_4_500k() {
  function test_asc_8_10k (line 259) | fn test_asc_8_10k() {
  function test_asc_8_100k (line 271) | fn test_asc_8_100k() {
  function test_asc_8_500k (line 283) | fn test_asc_8_500k() {
  function test_asc_16_10k (line 296) | fn test_asc_16_10k() {
  function test_asc_16_100k (line 308) | fn test_asc_16_100k() {
  function test_asc_16_500k (line 320) | fn test_asc_16_500k() {
  function quick_hll_16 (line 333) | fn quick_hll_16(values: HashSet<u64>) -> TestResult {
  function quick_merge_hll_16 (line 352) | fn quick_merge_hll_16(values_a: Vec<u64>, values_b: Vec<u64>) {
  function quick_merge_hll_8 (line 374) | fn quick_merge_hll_8(values_a: Vec<u64>, values_b: Vec<u64>) {
  function quick_merge_hll_4 (line 402) | fn quick_merge_hll_4(values_a: Vec<u64>, values_b: Vec<u64>) {
  function precision_for_error (line 422) | fn precision_for_error() {

FILE: crates/hyperloglogplusplus/src/registers.rs
  type Registers (line 22) | pub struct Registers<'s>(Cow<'s, [u8]>);
  function new (line 26) | pub fn new(exponent: u8) -> Self {
  function from_raw (line 44) | pub fn from_raw(bytes: &'s [u8]) -> Self {
  function at (line 49) | pub fn at(&self, idx: usize) -> u8 {
  function set_max (line 59) | pub fn set_max(&mut self, idx: usize, value: u8) {
  function bytes (line 93) | pub fn bytes(&self) -> &[u8] {
  function count_zeroed_registers (line 97) | pub fn count_zeroed_registers(&self) -> u64 {
  function iter (line 101) | pub fn iter(&self) -> impl Iterator<Item = u8> + '_ {
  function byte_len (line 128) | pub fn byte_len(&self) -> usize {
  function merge (line 132) | pub fn merge(a: &Registers<'_>, b: &Registers<'_>) -> Self {
  function into_owned (line 150) | pub fn into_owned(&self) -> Registers<'static> {
  function test_last_index_not_clobbered (line 160) | fn test_last_index_not_clobbered() {
  function test_last_index_not_clobbers (line 186) | fn test_last_index_not_clobbers() {
  function test_count_empty (line 215) | fn test_count_empty() {
  function test_count_4 (line 220) | fn test_count_4() {
  function test_count_5 (line 226) | fn test_count_5() {
  function test_count_6 (line 232) | fn test_count_6() {
  function test_count_7 (line 238) | fn test_count_7() {
  function test_iter_4_0_1 (line 244) | fn test_iter_4_0_1() {
  function quick_test (line 254) | fn quick_test(exp: u8, ops: Vec<(usize, u8)>) -> quickcheck::TestResult {
  function quick_merge (line 309) | fn quick_merge(

FILE: crates/hyperloglogplusplus/src/sparse.rs
  type Storage (line 16) | pub struct Storage<'s> {
  type Encoded (line 25) | pub struct Encoded(u32);
    method from_hash (line 193) | pub(crate) fn from_hash(hash: u64, precision: u8) -> Self {
    method idx (line 211) | pub fn idx(&self) -> u32 {
    method count (line 219) | pub fn count(&self, p: u8) -> u8 {
    method stores_count (line 231) | fn stores_count(&self) -> bool {
    method extract_count (line 236) | fn extract_count(&self) -> u8 {
  constant NUM_HIGH_BITS (line 27) | const NUM_HIGH_BITS: u8 = 25;
  type Overflowing (line 29) | pub type Overflowing = bool;
  function new (line 32) | pub fn new(precision: u8) -> Self {
  function from_parts (line 46) | pub fn from_parts(bytes: &'s [u8], num_compressed: u64, precision: u8) -...
  function into_owned (line 60) | pub fn into_owned(&self) -> Storage<'static> {
  function add_hash (line 69) | pub fn add_hash(&mut self, hash: u64) -> Overflowing {
  function add_encoded (line 74) | fn add_encoded(&mut self, encoded: Encoded) -> Overflowing {
  function estimate_count (line 85) | pub fn estimate_count(&mut self) -> u64 {
  function immutable_estimate_count (line 90) | pub fn immutable_estimate_count(&self) -> u64 {
  function merge_buffers (line 100) | pub fn merge_buffers(&mut self) {
  function iter (line 149) | fn iter(&self) -> impl Iterator<Item = Encoded> + '_ {
  function to_dense (line 153) | pub fn to_dense(&mut self) -> dense::Storage<'static> {
  function immutable_to_dense (line 159) | pub fn immutable_to_dense(&self) -> dense::Storage<'static> {
  function num_bytes (line 170) | pub fn num_bytes(&self) -> usize {
  function merge_in (line 174) | pub fn merge_in(&mut self, other: &Storage<'_>) -> Overflowing {
  method partial_cmp (line 242) | fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
  method cmp (line 250) | fn cmp(&self, other: &Self) -> std::cmp::Ordering {
  constant NUM_HASH_BITS (line 273) | const NUM_HASH_BITS: u8 = 64 - NUM_HIGH_BITS;
  function hash (line 275) | pub fn hash(val: i32) -> u64 {
  function test_asc_10k (line 282) | fn test_asc_10k() {
  function test_asc_100k (line 291) | fn test_asc_100k() {
  function test_asc_500k (line 301) | fn test_asc_500k() {
  function quick_sparse (line 312) | fn quick_sparse(values: Vec<u64>) -> TestResult {
  function quick_sparse_as_set (line 334) | fn quick_sparse_as_set(values: Vec<u64>) -> TestResult {
  function quick_sparse_merge_invariant (line 371) | fn quick_sparse_merge_invariant(values: Vec<u64>) -> TestResult {
  function sparse_merge_01 (line 408) | fn sparse_merge_01() {

FILE: crates/hyperloglogplusplus/src/sparse/varint.rs
  function decompression_iter (line 7) | pub fn decompression_iter<'a>(
  type Compressed (line 16) | pub struct Compressed<'c>(Cow<'c, [u8]>);
  function from_raw (line 19) | pub fn from_raw(bytes: &'c [u8]) -> Self {
  function bytes (line 23) | pub fn bytes(&self) -> &[u8] {
  function num_bytes (line 27) | pub fn num_bytes(&self) -> usize {
  function cap (line 32) | pub fn cap(&self) -> usize {
  function make_owned (line 36) | pub fn make_owned(&self) -> Compressed<'static> {
  type Compressor (line 41) | pub struct Compressor<F: FnMut(u64) -> u64> {
  function compressor (line 48) | pub fn compressor() -> Compressor<impl FnMut(u64) -> u64> {
  function is_empty (line 57) | pub fn is_empty(&self) -> bool {
  function last_mut (line 61) | pub fn last_mut(&mut self) -> Option<&mut Encoded> {
  function push (line 65) | pub fn push(&mut self, value: Encoded) {
  function into_compressed (line 73) | pub fn into_compressed(mut self) -> (Compressed<'static>, u64) {
  function compress_value (line 84) | fn compress_value(&mut self, Encoded(value): Encoded) {
  function extend (line 91) | fn extend<T: IntoIterator<Item = Encoded>>(&mut self, iter: T) {
  function quick_test_roundtrip (line 103) | fn quick_test_roundtrip(values: Vec<u32>) -> bool {

FILE: crates/scripting-utilities/control_file_reader/src/lib.rs
  type Result (line 5) | pub type Result<T, E = Error> = std::result::Result<T, E>;
  function get_current_version (line 8) | pub fn get_current_version(control_file: &str) -> Result<String> {
  function get_upgradeable_from (line 13) | pub fn get_upgradeable_from(control_file: &str) -> Result<Vec<String>> {
  function get_field_val (line 24) | pub fn get_field_val<'a>(file: &'a str, field_name: &str) -> Result<&'a ...
  function get_quoted_field (line 34) | pub fn get_quoted_field(line: &str) -> Result<&str> {
  type Error (line 44) | pub enum Error {
    method fmt (line 51) | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
    method fmt (line 61) | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {

FILE: crates/scripting-utilities/postgres_connection_configuration/src/lib.rs
  type ConnectionConfig (line 6) | pub struct ConnectionConfig<'s> {
  function with_db (line 15) | pub fn with_db<'d>(&self, database: &'d str) -> ConnectionConfig<'d>
  function config_string (line 26) | pub fn config_string(&self) -> String {

FILE: crates/stats-agg/src/lib.rs
  type FloatLike (line 10) | pub trait FloatLike:
    method lit (line 14) | fn lit(val: f64) -> Self {
    method from_u64 (line 17) | fn from_u64(n: u64) -> Self;
    method from_u64 (line 20) | fn from_u64(n: u64) -> Self {
    method from_u64 (line 25) | fn from_u64(n: u64) -> Self {
  type StatsError (line 31) | pub enum StatsError {
  type XYPair (line 36) | pub struct XYPair<T: FloatLike> {
  constant INV_FLOATING_ERROR_THRESHOLD (line 55) | const INV_FLOATING_ERROR_THRESHOLD: f64 = 0.99;
  constant INV_FLOATING_ERROR_THRESHOLD (line 57) | const INV_FLOATING_ERROR_THRESHOLD: f64 = f64::INFINITY;
  function accum (line 68) | pub(crate) fn accum<T: FloatLike>(n: T, sx: T, sxx: T, sx3: T, x: T) -> T {
  function remove (line 76) | pub(crate) fn remove<T: FloatLike>(new_n: T, new_sx: T, new_sxx: T, old_...
  function combine (line 85) | pub(crate) fn combine<T: FloatLike>(
  function remove_combined (line 104) | pub(crate) fn remove_combined<T: FloatLike>(
  function accum (line 129) | pub(crate) fn accum<T: FloatLike>(n: T, sx: T, sxx: T, sx3: T, sx4: T, x...
  function remove (line 138) | pub(crate) fn remove<T: FloatLike>(
  function combine (line 156) | pub(crate) fn combine<T: FloatLike>(
  function remove_combined (line 178) | pub(crate) fn remove_combined<T: FloatLike>(
  function floatlike_lit (line 208) | fn floatlike_lit() {

FILE: crates/stats-agg/src/stats1d.rs
  type StatsSummary1D (line 6) | pub struct StatsSummary1D<T: FloatLike> {
  method default (line 17) | fn default() -> Self {
  function from (line 24) | fn from(input_summary: StatsSummary1D<f64>) -> Self {
  function convert_tf_to_f64 (line 34) | pub fn convert_tf_to_f64(tf: TwoFloat) -> f64 {
  function from (line 38) | fn from(input_summary: StatsSummary1D<TwoFloat>) -> Self {
  function n64 (line 53) | fn n64(&self) -> T {
  function new (line 57) | pub fn new() -> Self {
  function accum (line 71) | pub fn accum(&mut self, p: T) -> Result<(), StatsError> {
  function has_infinite (line 112) | fn has_infinite(&self) -> bool {
  function check_overflow (line 119) | fn check_overflow(&self, old: &Self, p: T) -> bool {
  function remove (line 140) | pub fn remove(&self, p: T) -> Option<Self> {
  function new_from_vec (line 179) | pub fn new_from_vec(v: Vec<T>) -> Result<Self, StatsError> {
  function combine (line 187) | pub fn combine(&self, other: Self) -> Result<Self, StatsError> {
  function remove_combined (line 242) | pub fn remove_combined(&self, remove: Self) -> Option<Self> {
  function avg (line 292) | pub fn avg(&self) -> Option<T> {
  function count (line 299) | pub fn count(&self) -> i64 {
  function sum (line 303) | pub fn sum(&self) -> Option<T> {
  function var_pop (line 310) | pub fn var_pop(&self) -> Option<T> {
  function var_samp (line 317) | pub fn var_samp(&self) -> Option<T> {
  function stddev_pop (line 324) | pub fn stddev_pop(&self) -> Option<T> {
  function stddev_samp (line 328) | pub fn stddev_samp(&self) -> Option<T> {
  function skewness_pop (line 332) | pub fn skewness_pop(&self) -> Option<T> {
  function skewness_samp (line 336) | pub fn skewness_samp(&self) -> Option<T> {
  function kurtosis_pop (line 340) | pub fn kurtosis_pop(&self) -> Option<T> {
  function kurtosis_samp (line 344) | pub fn kurtosis_samp(&self) -> Option<T> {
  function tf (line 354) | fn tf(f: f64) -> TwoFloat {
  function assert_close_enough (line 359) | fn assert_close_enough(s1: &StatsSummary1D<f64>, s2: &StatsSummary1D<f64...
  function assert_close_enough_tf (line 368) | fn assert_close_enough_tf(s1: &StatsSummary1D<TwoFloat>, s2: &StatsSumma...
  function test_against_known_vals (line 377) | fn test_against_known_vals() {
  function test_against_known_vals_tf (line 416) | fn test_against_known_vals_tf() {
  function test_combine (line 486) | fn test_combine() {

FILE: crates/stats-agg/src/stats2d.rs
  type StatsSummary2D (line 11) | pub struct StatsSummary2D<T: FloatLike> {
  function from (line 25) | fn from(input_summary: StatsSummary2D<TwoFloat>) -> Self {
  method default (line 42) | fn default() -> Self {
  function new (line 48) | pub fn new() -> Self {
  function n64 (line 63) | fn n64(&self) -> T {
  function accum (line 81) | pub fn accum(&mut self, p: XYPair<T>) -> Result<(), StatsError> {
  function has_infinite (line 145) | fn has_infinite(&self) -> bool {
  function check_overflow (line 156) | fn check_overflow(&self, old: &StatsSummary2D<T>, p: XYPair<T>) -> bool {
  function remove (line 194) | pub fn remove(&self, p: XYPair<T>) -> Option<Self> {
  function new_from_vec (line 253) | pub fn new_from_vec(v: Vec<XYPair<T>>) -> Result<Self, StatsError> {
  function combine (line 276) | pub fn combine(&self, other: StatsSummary2D<T>) -> Result<Self, StatsErr...
  function remove_combined (line 354) | pub fn remove_combined(&self, remove: StatsSummary2D<T>) -> Option<Self> {
  function offset (line 450) | pub fn offset(&mut self, offset: XYPair<T>) -> Result<(), StatsError> {
  function sum_squares (line 475) | pub fn sum_squares(&self) -> Option<XYPair<T>> {
  function sumxy (line 494) | pub fn sumxy(&self) -> Option<T> {
  function avg (line 513) | pub fn avg(&self) -> Option<XYPair<T>> {
  function count (line 534) | pub fn count(&self) -> i64 {
  function sum (line 550) | pub fn sum(&self) -> Option<XYPair<T>> {
  function var_pop (line 560) | pub fn var_pop(&self) -> Option<XYPair<T>> {
  function var_samp (line 570) | pub fn var_samp(&self) -> Option<XYPair<T>> {
  function stddev_pop (line 581) | pub fn stddev_pop(&self) -> Option<XYPair<T>> {
  function stddev_samp (line 590) | pub fn stddev_samp(&self) -> Option<XYPair<T>> {
  function skewness_pop (line 598) | pub fn skewness_pop(&self) -> Option<XYPair<T>> {
  function skewness_samp (line 606) | pub fn skewness_samp(&self) -> Option<XYPair<T>> {
  function kurtosis_pop (line 614) | pub fn kurtosis_pop(&self) -> Option<XYPair<T>> {
  function kurtosis_samp (line 622) | pub fn kurtosis_samp(&self) -> Option<XYPair<T>> {
  function corr (line 634) | pub fn corr(&self) -> Option<T> {
  function slope (line 643) | pub fn slope(&self) -> Option<T> {
  function intercept (line 653) | pub fn intercept(&self) -> Option<T> {
  function x_intercept (line 664) | pub fn x_intercept(&self) -> Option<T> {
  function determination_coeff (line 677) | pub fn determination_coeff(&self) -> Option<T> {
  function covar_samp (line 699) | pub fn covar_samp(&self) -> Option<T> {
  function covar_pop (line 717) | pub fn covar_pop(&self) -> Option<T> {
  function tf (line 728) | fn tf(f: f64) -> TwoFloat {
  function test_linear (line 733) | fn test_linear() {
  function test_linear_tf (line 785) | fn test_linear_tf() {

FILE: crates/stats-agg/src/stats2d/stats2d_flat_serialize.rs
  constant REQUIRED_ALIGNMENT (line 6) | const REQUIRED_ALIGNMENT: usize = {
  constant MAX_PROVIDED_ALIGNMENT (line 51) | const MAX_PROVIDED_ALIGNMENT: Option<usize> = {
  constant MIN_LEN (line 140) | const MIN_LEN: usize = {
  constant TRIVIAL_COPY (line 155) | const TRIVIAL_COPY: bool = true;
  type SLICE (line 156) | type SLICE = flat_serialize::Slice<'a, StatsSummary2D<f64>>;
  type OWNED (line 157) | type OWNED = Self;
  function try_ref (line 160) | unsafe fn try_ref(mut input: &[u8]) -> Result<(Self, &[u8]), flat_serial...
  function fill_slice (line 335) | unsafe fn fill_slice<'out>(
  function num_bytes (line 394) | fn num_bytes(&self) -> usize {
  function make_owned (line 420) | fn make_owned(&mut self) {}
  function into_owned (line 422) | fn into_owned(self) -> Self::OWNED {

FILE: crates/t-digest-lib/src/lib.rs
  function timescaledb_toolkit_tdigest_builder_with_size (line 5) | pub extern "C" fn timescaledb_toolkit_tdigest_builder_with_size(
  function timescaledb_toolkit_tdigest_push (line 12) | pub unsafe extern "C" fn timescaledb_toolkit_tdigest_push(
  function timescaledb_toolkit_tdigest_merge (line 21) | pub unsafe extern "C" fn timescaledb_toolkit_tdigest_merge(
  function timescaledb_toolkit_tdigest_builder_free (line 30) | pub extern "C" fn timescaledb_toolkit_tdigest_builder_free(_: Box<tdiges...
  function timescaledb_toolkit_tdigest_build (line 33) | pub extern "C" fn timescaledb_toolkit_tdigest_build(
  function timescaledb_toolkit_tdigest_free (line 40) | pub extern "C" fn timescaledb_toolkit_tdigest_free(_: Box<tdigest::TDige...
  function timescaledb_toolkit_tdigest_format_for_postgres (line 47) | pub unsafe extern "C" fn timescaledb_toolkit_tdigest_format_for_postgres(

FILE: crates/t-digest/src/lib.rs
  type Centroid (line 52) | pub struct Centroid {
    method new (line 70) | pub fn new(mean: f64, weight: u64) -> Self {
    method mean (line 78) | pub fn mean(&self) -> f64 {
    method weight (line 83) | pub fn weight(&self) -> u64 {
    method add (line 87) | pub fn add(&mut self, sum: f64, weight: u64) -> f64 {
  method partial_cmp (line 58) | fn partial_cmp(&self, other: &Centroid) -> Option<Ordering> {
  method cmp (line 64) | fn cmp(&self, other: &Centroid) -> Ordering {
  method default (line 100) | fn default() -> Self {
  type TDigest (line 110) | pub struct TDigest {
    method new_with_size (line 120) | pub fn new_with_size(max_size: usize) -> Self {
    method new (line 131) | pub fn new(
    method raw_centroids (line 159) | pub fn raw_centroids(&self) -> &[Centroid] {
    method mean (line 164) | pub fn mean(&self) -> f64 {
    method sum (line 175) | pub fn sum(&self) -> f64 {
    method count (line 180) | pub fn count(&self) -> u64 {
    method max (line 185) | pub fn max(&self) -> f64 {
    method min (line 190) | pub fn min(&self) -> f64 {
    method is_empty (line 195) | pub fn is_empty(&self) -> bool {
    method max_size (line 200) | pub fn max_size(&self) -> usize {
    method num_buckets (line 205) | pub fn num_buckets(&self) -> usize {
    method format_for_postgres (line 209) | pub fn format_for_postgres(&self) -> String {
    method k_to_q (line 253) | fn k_to_q(k: f64, d: f64) -> f64 {
    method merge_unsorted (line 263) | pub fn merge_unsorted(&self, unsorted_values: Vec<f64>) -> TDigest {
    method update_bounds_on_overflow (line 275) | fn update_bounds_on_overflow(
    method merge_sorted (line 288) | pub fn merge_sorted(&self, sorted_values: Vec<f64>) -> TDigest {
    method external_merge (line 379) | fn external_merge(centroids: &mut [Centroid], first: usize, middle: us...
    method merge_digests (line 420) | pub fn merge_digests(digests: Vec<TDigest>) -> TDigest {
    method estimate_quantile_at_value (line 518) | pub fn estimate_quantile_at_value(&self, v: f64) -> f64 {
    method estimate_quantile (line 563) | pub fn estimate_quantile(&self, q: f64) -> f64 {
  method default (line 240) | fn default() -> Self {
  type Builder (line 684) | pub struct Builder {
    method from (line 691) | fn from(digested: TDigest) -> Self {
    method with_size (line 700) | pub fn with_size(size: usize) -> Self {
    method push (line 706) | pub fn push(&mut self, value: f64) {
    method digest (line 714) | fn digest(&mut self) {
    method build (line 722) | pub fn build(&mut self) -> TDigest {
    method merge (line 727) | pub fn merge(&mut self, other: Self) {
  function test_centroid_addition_regression (line 745) | fn test_centroid_addition_regression() {
  function test_merge_sorted_against_uniform_distro (line 767) | fn test_merge_sorted_against_uniform_distro() {
  function test_merge_unsorted_against_uniform_distro (line 805) | fn test_merge_unsorted_against_uniform_distro() {
  function test_merge_sorted_against_skewed_distro (line 843) | fn test_merge_sorted_against_skewed_distro() {
  function test_merge_unsorted_against_skewed_distro (line 869) | fn test_merge_unsorted_against_skewed_distro() {
  function test_merge_digests (line 895) | fn test_merge_digests() {
  function test_quantile_and_value_estimates (line 939) | fn test_quantile_and_value_estimates() {
  function test_buffered_merge (line 978) | fn test_buffered_merge() {
  type OrderedF64 (line 998) | struct OrderedF64(OrderedFloat<f64>);
  method arbitrary (line 1001) | fn arbitrary(g: &mut Gen) -> Self {
  function fuzzing_test (line 1007) | fn fuzzing_test(

FILE: crates/time-weighted-average/src/lib.rs
  type TimeWeightMethod (line 8) | pub enum TimeWeightMethod {
    method interpolate (line 205) | pub fn interpolate(
    method weighted_sum (line 232) | pub fn weighted_sum(&self, first: TSPoint, second: TSPoint) -> f64 {
  type TimeWeightSummary (line 14) | pub struct TimeWeightSummary {
    method new (line 32) | pub fn new(pt: TSPoint, method: TimeWeightMethod) -> Self {
    method accum (line 41) | pub fn accum(&mut self, pt: TSPoint) -> Result<(), TimeWeightError> {
    method combine (line 60) | pub fn combine(&self, next: &TimeWeightSummary) -> Result<TimeWeightSu...
    method new_from_sorted_iter (line 79) | pub fn new_from_sorted_iter<'a>(
    method combine_sorted_iter (line 96) | pub fn combine_sorted_iter<'a>(
    method with_bounds (line 121) | pub fn with_bounds(
    method with_prev (line 137) | fn with_prev(&self, target_start: i64, prev: TSPoint) -> Result<Self, ...
    method with_next (line 158) | fn with_next(&self, target_end: i64, next: Option<TSPoint>) -> Result<...
    method time_weighted_average (line 185) | pub fn time_weighted_average(&self) -> Result<f64, TimeWeightError> {
    method time_weighted_integral (line 194) | pub fn time_weighted_integral(&self) -> f64 {
  type TimeWeightError (line 22) | pub enum TimeWeightError {
  function test_simple_accum_locf (line 263) | fn test_simple_accum_locf() {
  function test_simple_accum_linear (line 278) | fn test_simple_accum_linear() {
  function new_from_sorted_iter_test (line 293) | fn new_from_sorted_iter_test(t: TimeWeightMethod) {
  function test_new_from_sorted_iter (line 324) | fn test_new_from_sorted_iter() {
  function combine_test (line 329) | fn combine_test(t: TimeWeightMethod) {
  function test_combine (line 358) | fn test_combine() {
  function order_accum_test (line 363) | fn order_accum_test(t: TimeWeightMethod) {
  function test_order_accum (line 418) | fn test_order_accum() {
  function order_combine_test (line 423) | fn order_combine_test(t: TimeWeightMethod) {
  function test_order_combine (line 445) | fn test_order_combine() {
  function combine_sorted_iter_test (line 450) | fn combine_sorted_iter_test(t: TimeWeightMethod) {
  function test_combine_sorted_iter (line 505) | fn test_combine_sorted_iter() {
  function test_mismatch_combine (line 511) | fn test_mismatch_combine() {
  function test_weighted_sum (line 538) | fn test_weighted_sum() {
  function with_prev_common_test (line 557) | fn with_prev_common_test(t: TimeWeightMethod) {
  function test_with_prev (line 608) | fn test_with_prev() {
  function with_next_common_test (line 654) | fn with_next_common_test(t: TimeWeightMethod) {
  function test_with_next (line 708) | fn test_with_next() {
  function average_common_tests (line 762) | fn average_common_tests(t: TimeWeightMethod) {
  function test_average (line 770) | fn test_average() {

FILE: crates/tspoint/src/lib.rs
  type TSPoint (line 9) | pub struct TSPoint {
    method interpolate_linear (line 20) | pub fn interpolate_linear(&self, p2: &TSPoint, ts: i64) -> Result<f64,...
    method deserialize (line 62) | fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
  type TSPointError (line 15) | pub enum TSPointError {
  method serialize (line 32) | fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
  function test_linear_interpolate (line 157) | fn test_linear_interpolate() {

FILE: crates/udd-sketch/src/lib.rs
  type SketchHashKey (line 25) | pub enum SketchHashKey {
    method cmp (line 34) | fn cmp(&self, other: &Self) -> std::cmp::Ordering {
    method partial_cmp (line 53) | fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
    method compact_key (line 71) | fn compact_key(&self) -> SketchHashKey {
  type UDDSketchMetadata (line 60) | pub struct UDDSketchMetadata {
  type SketchHashEntry (line 86) | struct SketchHashEntry {
  type SketchHashMap (line 93) | struct SketchHashMap {
    type Output (line 99) | type Output = u64;
    method index (line 101) | fn index(&self, id: SketchHashKey) -> &Self::Output {
    method new (line 128) | fn new() -> SketchHashMap {
    method with_capacity (line 135) | fn with_capacity(capacity: usize) -> SketchHashMap {
    method increment (line 143) | fn increment(&mut self, key: SketchHashKey) {
    method iter (line 147) | fn iter(&self) -> SketchHashIterator<'_> {
    method entry_split (line 158) | fn entry_split(&mut self, key: SketchHashKey) -> SketchHashKey {
    method entry_upsert (line 192) | fn entry_upsert(&mut self, key: SketchHashKey, count: u64) {
    method len (line 222) | fn len(&self) -> usize {
    method compact_using_stack (line 227) | fn compact_using_stack<const N: usize>(&mut self) {
    method populate_map_using_iter (line 249) | fn populate_map_using_iter(&mut self, entries: &mut [(SketchHashKey, u...
    method compact (line 305) | fn compact(&mut self) {
    method compact_using_heap (line 318) | fn compact_using_heap(&mut self) {
  type SketchHashIterator (line 108) | pub struct SketchHashIterator<'a> {
  type Item (line 114) | type Item = (SketchHashKey, u64);
  method next (line 116) | fn next(&mut self) -> Option<(SketchHashKey, u64)> {
  type UDDSketch (line 329) | pub struct UDDSketch {
    method new (line 340) | pub fn new(max_buckets: u32, initial_error: f64) -> Self {
    method new_from_data (line 355) | pub fn new_from_data(
    method key (line 393) | fn key(&self, value: f64) -> SketchHashKey {
    method compact_buckets (line 397) | pub fn compact_buckets(&mut self) {
    method bucket_iter (line 405) | pub fn bucket_iter(&self) -> SketchHashIterator<'_> {
    method add_value (line 411) | pub fn add_value(&mut self, value: f64) {
    method merge_items (line 425) | pub fn merge_items(
    method merge_sketch (line 468) | pub fn merge_sketch(&mut self, other: &UDDSketch) {
    method max_allowed_buckets (line 512) | pub fn max_allowed_buckets(&self) -> u32 {
    method times_compacted (line 516) | pub fn times_compacted(&self) -> u8 {
    method current_buckets_count (line 520) | pub fn current_buckets_count(&self) -> usize {
    method mean (line 527) | pub fn mean(&self) -> f64 {
    method sum (line 536) | pub fn sum(&self) -> f64 {
    method count (line 541) | pub fn count(&self) -> u64 {
    method max_error (line 546) | pub fn max_error(&self) -> f64 {
    method estimate_quantile (line 550) | pub fn estimate_quantile(&self, quantile: f64) -> f64 {
    method estimate_quantile_at_value (line 560) | pub fn estimate_quantile_at_value(&self, value: f64) -> f64 {
  function estimate_quantile (line 565) | pub fn estimate_quantile(
  function last_bucket_value (line 592) | fn last_bucket_value(
  function bucket_to_value (line 602) | fn bucket_to_value(alpha: f64, gamma: f64, bucket: SketchHashKey) -> f64 {
  function estimate_quantile_at_value (line 612) | pub fn estimate_quantile_at_value(
  function key (line 637) | fn key(value: f64, gamma: f64) -> SketchHashKey {
  function gamma (line 650) | pub fn gamma(alpha: f64) -> f64 {
  function build_and_add_values (line 661) | fn build_and_add_values() {
  function exceed_buckets (line 673) | fn exceed_buckets() {
  function merge_verifier (line 699) | fn merge_verifier(sketch: &mut UDDSketch, other: &UDDSketch) {
  function merge_sketches (line 727) | fn merge_sketches() {
  function test_quantile_and_value_estimates (line 802) | fn test_quantile_and_value_estimates() {
  function test_extreme_quantile_at_value (line 842) | fn test_extreme_quantile_at_value() {
  function random_stress (line 857) | fn random_stress() {
  type OrderedF64 (line 893) | struct OrderedF64(OrderedFloat<f64>);
  method arbitrary (line 896) | fn arbitrary(g: &mut Gen) -> Self {
  function test_entry_invalid_hashmap_key (line 903) | fn test_entry_invalid_hashmap_key() {
  function test_entry_insertion_order (line 913) | fn test_entry_insertion_order() {
  function fuzzing_test (line 971) | fn fuzzing_test(

FILE: docs/examples/tdigest.c
  type TDigestBuilder (line 10) | struct TDigestBuilder
  type TDigest (line 11) | struct TDigest
  type TDigestBuilder (line 17) | struct TDigestBuilder
  type TDigestBuilder (line 21) | struct TDigestBuilder
  type TDigestBuilder (line 24) | struct TDigestBuilder
  type TDigestBuilder (line 24) | struct TDigestBuilder
  type TDigestBuilder (line 29) | struct TDigestBuilder
  type TDigest (line 33) | struct TDigest
  type TDigestBuilder (line 34) | struct TDigestBuilder
  type TDigest (line 38) | struct TDigest
  type TDigest (line 44) | struct TDigest
  function main (line 51) | int

FILE: docs/examples/tdigest.py
  class TDigest (line 21) | class TDigest:
    class Builder (line 23) | class Builder:
      method __init__ (line 24) | def __init__(self, pointer):
      method __enter__ (line 27) | def __enter__(self):
      method __exit__ (line 31) | def __exit__(self, exc_type, exc_val, exc_tb):
      method __del__ (line 37) | def __del__(self):
      method with_size (line 41) | def with_size(size):
      method push (line 44) | def push(self, value):
      method build (line 47) | def build(self):
    method __init__ (line 53) | def __init__(self, pointer):
    method __enter__ (line 56) | def __enter__(self):
    method __exit__ (line 60) | def __exit__(self, exc_type, exc_val, exc_tb):
    method __del__ (line 66) | def __del__(self):
    method format_for_postgres (line 70) | def format_for_postgres(self):
  function test (line 78) | def test():

FILE: extension/src/accessors.rs
  function accessor_live_at (line 121) | pub fn accessor_live_at(ts: crate::raw::TimestampTz) -> AccessorLiveAt {
  function accessor_stddev (line 142) | pub fn accessor_stddev(method: default!(&str, "'sample'")) -> AccessorSt...
  function accessor_stddev_x (line 164) | pub fn accessor_stddev_x(method: default!(&str, "'sample'")) -> Accessor...
  function accessor_stddev_y (line 186) | pub fn accessor_stddev_y(method: default!(&str, "'sample'")) -> Accessor...
  function accessor_variance (line 208) | pub fn accessor_variance(method: default!(&str, "'sample'")) -> Accessor...
  function accessor_variance_x (line 230) | pub fn accessor_variance_x(method: default!(&str, "'sample'")) -> Access...
  function accessor_variance_y (line 252) | pub fn accessor_variance_y(method: default!(&str, "'sample'")) -> Access...
  function accessor_skewness (line 274) | pub fn accessor_skewness(method: default!(&str, "'sample'")) -> Accessor...
  function accessor_skewness_x (line 296) | pub fn accessor_skewness_x(method: default!(&str, "'sample'")) -> Access...
  function accessor_skewness_y (line 318) | pub fn accessor_skewness_y(method: default!(&str, "'sample'")) -> Access...
  function accessor_kurtosis (line 340) | pub fn accessor_kurtosis(method: default!(&str, "'sample'")) -> Accessor...
  function accessor_kurtosis_x (line 362) | pub fn accessor_kurtosis_x(method: default!(&str, "'sample'")) -> Access...
  function accessor_kurtosis_y (line 384) | pub fn accessor_kurtosis_y(method: default!(&str, "'sample'")) -> Access...
  function accessor_covar (line 406) | pub fn accessor_covar(method: default!(&str, "'sample'")) -> AccessorCov...
  function accessor_extrapolated_delta (line 428) | pub fn accessor_extrapolated_delta(method: &str) -> AccessorExtrapolated...
  function accessor_extrapolated_rate (line 450) | pub fn accessor_extrapolated_rate(method: &str) -> AccessorExtrapolatedR...
  function accessor_with_bounds (line 475) | pub fn accessor_with_bounds(bounds: crate::raw::tstzrange) -> AccessorWi...
  method bounds (line 503) | pub fn bounds(&self) -> Option<I64Range> {
  function accessor_unnest (line 527) | pub fn accessor_unnest() -> AccessorUnnest {
  function accessor_integral (line 546) | pub fn accessor_integral(unit: default!(&str, "'second'")) -> AccessorIn...
  function accessor_topn_count (line 579) | pub fn accessor_topn_count(count: i64) -> AccessorTopNCount {
  function accessor_max_frequency_int (line 599) | pub fn accessor_max_frequency_int(value: i64) -> AccessorMaxFrequencyInt {
  function accessor_min_frequency_int (line 619) | pub fn accessor_min_frequency_int(value: i64) -> AccessorMinFrequencyInt {
  function accessor_percentiles (line 640) | pub fn accessor_percentiles(unit: Vec<f64>) -> AccessorPercentileArray {

FILE: extension/src/accessors/tests.rs
  function one_field_works (line 12) | fn one_field_works() {
  function two_field_works (line 18) | fn two_field_works() {

FILE: extension/src/aggregate_builder_tests.rs
  type State (line 19) | type State = String;
  function transition (line 21) | fn transition(state: Option<State>, #[sql_type("text")] value: String) -...
  function finally (line 25) | fn finally(state: Option<&mut State>) -> Option<String> {
  type State (line 32) | type State = String;
  function transition (line 34) | fn transition(state: Option<State>, #[sql_type("text")] value: String) -...
  function finally (line 38) | fn finally(state: Option<&mut State>) -> Option<String> {
  function serialize (line 42) | fn serialize(state: &State) -> bytea {
  function deserialize (line 46) | fn deserialize(bytes: bytea) -> State {
  function combine (line 50) | fn combine(a: Option<&State>, b: Option<&State>) -> Option<State> {
  type State (line 57) | type State = String;
  function transition (line 59) | fn transition(state: Option<State>, #[sql_type("text")] value: String) -...
  function finally (line 63) | fn finally(state: Option<&mut State>) -> Option<String> {
  constant PARALLEL_SAFE (line 67) | const PARALLEL_SAFE: bool = true;
  function serialize (line 69) | fn serialize(state: &State) -> bytea {
  function deserialize (line 73) | fn deserialize(bytes: bytea) -> State {
  function combine (line 77) | fn combine(a: Option<&State>, b: Option<&State>) -> Option<State> {
  function test_anything_in_experimental_and_returns_first (line 89) | fn test_anything_in_experimental_and_returns_first() {
  function test_anything_has_correct_fn_names_and_def (line 107) | fn test_anything_has_correct_fn_names_and_def() {
  function test_cagg_anything_has_correct_fn_names_and_def (line 135) | fn test_cagg_anything_has_correct_fn_names_and_def() {
  function test_parallel_anything_has_correct_fn_names_and_def (line 163) | fn test_parallel_anything_has_correct_fn_names_and_def() {
  function get_aggregate_spec (line 193) | fn get_aggregate_spec(client: &mut spi::SpiClient, aggregate_name: &str)...

FILE: extension/src/aggregate_utils.rs
  function get_collation (line 6) | pub unsafe fn get_collation(fcinfo: pg_sys::FunctionCallInfo) -> Option<...
  function get_collation_or_default (line 14) | pub fn get_collation_or_default(fcinfo: pg_sys::FunctionCallInfo) -> Opt...
  function in_aggregate_context (line 22) | pub unsafe fn in_aggregate_context<T, F: FnOnce() -> T>(
  function aggregate_mctx (line 31) | pub unsafe fn aggregate_mctx(fcinfo: pg_sys::FunctionCallInfo) -> Option...

FILE: extension/src/asap.rs
  type ASAPTransState (line 16) | pub struct ASAPTransState {
    method add_point (line 66) | fn add_point(&mut self, point: TSPoint) {
  function asap_trans (line 23) | pub fn asap_trans(
  function asap_trans_internal (line 32) | pub fn asap_trans_internal(
  function asap_final (line 77) | fn asap_final(
  function asap_final_inner (line 83) | fn asap_final_inner(
  function asap_on_timevector (line 136) | pub fn asap_on_timevector(
  function test_against_reference (line 202) | fn test_against_reference() {
  function test_asap_equivalence (line 273) | fn test_asap_equivalence() {

FILE: extension/src/candlestick.rs
  method new (line 40) | pub fn new(ts: i64, open: f64, high: f64, low: f64, close: f64, volume: ...
  method from_tick (line 63) | pub fn from_tick(ts: i64, price: f64, volume: Option<f64>) -> Self {
  method add_tick_data (line 67) | pub fn add_tick_data(&mut self, ts: i64, price: f64, volume: Option<f64>) {
  method combine (line 94) | pub fn combine(&mut self, candlestick: &Candlestick) {
  method open (line 131) | pub fn open(&self) -> f64 {
  method high (line 135) | pub fn high(&self) -> f64 {
  method low (line 139) | pub fn low(&self) -> f64 {
  method close (line 143) | pub fn close(&self) -> f64 {
  method open_time (line 147) | pub fn open_time(&self) -> i64 {
  method high_time (line 151) | pub fn high_time(&self) -> i64 {
  method low_time (line 155) | pub fn low_time(&self) -> i64 {
  method close_time (line 159) | pub fn close_time(&self) -> i64 {
  method volume (line 163) | pub fn volume(&self) -> Option<f64> {
  method vwap (line 170) | pub fn vwap(&self) -> Option<f64> {
  function candlestick (line 187) | pub fn candlestick(
  function tick_data_no_vol_transition (line 209) | pub fn tick_data_no_vol_transition(
  function tick_data_transition (line 219) | pub fn tick_data_transition(
  function tick_data_transition_inner (line 229) | pub fn tick_data_transition_inner(
  function candlestick_rollup_trans (line 257) | pub fn candlestick_rollup_trans(
  function candlestick_rollup_trans_inner (line 265) | pub fn candlestick_rollup_trans_inner(
  function candlestick_final (line 284) | pub fn candlestick_final(state: Internal, fcinfo: pg_sys::FunctionCallIn...
  function candlestick_final_inner (line 288) | pub fn candlestick_final_inner(
  function candlestick_combine (line 304) | pub fn candlestick_combine(
  function candlestick_combine_inner (line 312) | pub fn candlestick_combine_inner(
  function candlestick_serialize (line 331) | pub fn candlestick_serialize(state: Internal) -> bytea {
  function candlestick_deserialize (line 339) | pub fn candlestick_deserialize(bytes: bytea, _internal: Internal) -> Opt...
  function candlestick_deserialize_inner (line 343) | pub fn candlestick_deserialize_inner(bytes: bytea) -> Inner<Candlestick> {
  function arrow_open (line 399) | pub fn arrow_open(candlestick: Option<Candlestick>, _accessor: AccessorO...
  function open (line 404) | pub fn open(candlestick: Option<Candlestick>) -> Option<f64> {
  function arrow_high (line 410) | pub fn arrow_high(candlestick: Option<Candlestick>, _accessor: AccessorH...
  function high (line 415) | pub fn high(candlestick: Option<Candlestick>) -> Option<f64> {
  function arrow_low (line 421) | pub fn arrow_low(candlestick: Option<Candlestick>, _accessor: AccessorLo...
  function low (line 426) | pub fn low(candlestick: Option<Candlestick>) -> Option<f64> {
  function arrow_close (line 432) | pub fn arrow_close(candlestick: Option<Candlestick>, _accessor: Accessor...
  function close (line 437) | pub fn close(candlestick: Option<Candlestick>) -> Option<f64> {
  function arrow_open_time (line 443) | pub fn arrow_open_time(
  function open_time (line 451) | pub fn open_time(candlestick: Option<Candlestick>) -> Option<crate::raw:...
  function arrow_high_time (line 457) | pub fn arrow_high_time(
  function high_time (line 465) | pub fn high_time(candlestick: Option<Candlestick>) -> Option<crate::raw:...
  function arrow_low_time (line 471) | pub fn arrow_low_time(
  function low_time (line 479) | pub fn low_time(candlestick: Option<Candlestick>) -> Option<crate::raw::...
  function arrow_close_time (line 485) | pub fn arrow_close_time(
  function close_time (line 493) | pub fn close_time(candlestick: Option<Candlestick>) -> Option<crate::raw...
  function volume (line 498) | pub fn volume(candlestick: Option<Candlestick>) -> Option<f64> {
  function vwap (line 506) | pub fn vwap(candlestick: Option<Candlestick>) -> Option<f64> {
  function candlestick_single_point (line 543) | fn candlestick_single_point() {
  function candlestick_agg_single_point (line 567) | fn candlestick_agg_single_point() {
  function candlestick_accessors (line 591) | fn candlestick_accessors() {
  function candlestick_agg_accessors (line 646) | fn candlestick_agg_accessors() {
  function candlestick_agg_extreme_values (line 684) | fn candlestick_agg_extreme_values() {
  function candlestick_null_inputs (line 735) | fn candlestick_null_inputs() {
  function candlestick_agg_null_inputs (line 753) | fn candlestick_agg_null_inputs() {
  function candlestick_as_constructor (line 787) | fn candlestick_as_constructor() {
  function candlestick_agg_constant (line 831) | fn candlestick_agg_constant() {
  function candlestick_agg_strictly_increasing (line 861) | fn candlestick_agg_strictly_increasing() {
  function candlestick_agg_strictly_decreasing (line 891) | fn candlestick_agg_strictly_decreasing() {
  function candlestick_agg_oscillating (line 921) | fn candlestick_agg_oscillating() {
  function candlestick_rollup (line 958) | fn candlestick_rollup() {
  function candlestick_agg_rollup (line 989) | fn candlestick_agg_rollup() {
  function candlestick_byte_io (line 1030) | fn candlestick_byte_io() {

FILE: extension/src/counter_agg.rs
  type PgTypeHackStatsSummary2D (line 37) | type PgTypeHackStatsSummary2D = StatsSummary2D<f64>;
  method to_internal_counter_summary (line 58) | pub fn to_internal_counter_summary(&self) -> MetricSummary {
  method from_internal_counter_summary (line 71) | pub fn from_internal_counter_summary(st: MetricSummary) -> Self {
  method interpolate (line 89) | fn interpolate(
  type CounterSummaryTransState (line 154) | pub struct CounterSummaryTransState {
    method new (line 166) | fn new() -> Self {
    method push_point (line 174) | fn push_point(&mut self, value: TSPoint) {
    method combine_points (line 182) | fn combine_points(&mut self) {
    method push_summary (line 203) | fn push_summary(&mut self, other: &CounterSummaryTransState) {
    method combine_summaries (line 210) | fn combine_summaries(&mut self) {
  function counter_summary_trans_serialize (line 230) | pub fn counter_summary_trans_serialize(state: Internal) -> bytea {
  function counter_summary_trans_deserialize (line 238) | pub fn counter_summary_trans_deserialize(bytes: bytea, _internal: Intern...
  function counter_summary_trans_deserialize_inner (line 241) | pub fn counter_summary_trans_deserialize_inner(bytes: bytea) -> Inner<Co...
  function counter_agg_trans (line 247) | pub fn counter_agg_trans(
  function counter_agg_trans_inner (line 256) | pub fn counter_agg_trans_inner(
  function counter_agg_trans_no_bounds (line 289) | pub fn counter_agg_trans_no_bounds(
  function counter_agg_summary_trans (line 299) | pub fn counter_agg_summary_trans(
  function counter_agg_summary_trans_inner (line 306) | pub fn counter_agg_summary_trans_inner(
  function counter_agg_combine (line 332) | pub fn counter_agg_combine(
  function counter_agg_combine_inner (line 339) | pub fn counter_agg_combine_inner(
  function counter_agg_final (line 372) | fn counter_agg_final(state: Internal, fcinfo: pg_sys::FunctionCallInfo) ...
  function counter_agg_final_inner (line 375) | fn counter_agg_final_inner(
  function arrow_counter_agg_delta (line 472) | pub fn arrow_counter_agg_delta(sketch: CounterSummary, _accessor: Access...
  function counter_agg_delta (line 477) | fn counter_agg_delta(summary: CounterSummary) -> f64 {
  function arrow_counter_agg_rate (line 483) | pub fn arrow_counter_agg_rate(sketch: CounterSummary, _accessor: Accesso...
  function counter_agg_rate (line 488) | fn counter_agg_rate(summary: CounterSummary) -> Option<f64> {
  function arrow_counter_agg_time_delta (line 494) | pub fn arrow_counter_agg_time_delta(sketch: CounterSummary, _accessor: A...
  function counter_agg_time_delta (line 499) | fn counter_agg_time_delta(summary: CounterSummary) -> f64 {
  function arrow_counter_agg_irate_left (line 505) | pub fn arrow_counter_agg_irate_left(
  function counter_agg_irate_left (line 513) | fn counter_agg_irate_left(summary: CounterSummary) -> Option<f64> {
  function arrow_counter_agg_irate_right (line 519) | pub fn arrow_counter_agg_irate_right(
  function counter_agg_irate_right (line 527) | fn counter_agg_irate_right(summary: CounterSummary) -> Option<f64> {
  function arrow_counter_agg_idelta_left (line 533) | pub fn arrow_counter_agg_idelta_left(sketch: CounterSummary, _accessor: ...
  function counter_agg_idelta_left (line 538) | fn counter_agg_idelta_left(summary: CounterSummary) -> f64 {
  function arrow_counter_agg_idelta_right (line 544) | pub fn arrow_counter_agg_idelta_right(
  function counter_agg_idelta_right (line 552) | fn counter_agg_idelta_right(summary: CounterSummary) -> f64 {
  function arrow_counter_agg_with_bounds (line 558) | pub fn arrow_counter_agg_with_bounds(
  function counter_agg_with_bounds (line 568) | fn counter_agg_with_bounds(summary: CounterSummary, bounds: tstzrange) -...
  function arrow_counter_agg_extrapolated_delta (line 591) | pub fn arrow_counter_agg_extrapolated_delta(
  function counter_agg_extrapolated_delta (line 599) | fn counter_agg_extrapolated_delta(summary: CounterSummary, method: &str)...
  function counter_agg_interpolated_delta (line 609) | fn counter_agg_interpolated_delta(
  function arrow_counter_interpolated_delta (line 625) | pub fn arrow_counter_interpolated_delta(
  function arrow_counter_agg_extrapolated_rate (line 651) | pub fn arrow_counter_agg_extrapolated_rate(
  function counter_agg_extrapolated_rate (line 659) | fn counter_agg_extrapolated_rate(summary: CounterSummary, method: &str) ...
  function counter_agg_interpolated_rate (line 669) | fn counter_agg_interpolated_rate(
  function arrow_counter_interpolated_rate (line 685) | pub fn arrow_counter_interpolated_rate(
  function arrow_counter_agg_num_elements (line 711) | pub fn arrow_counter_agg_num_elements(
  function counter_agg_num_elements (line 719) | fn counter_agg_num_elements(summary: CounterSummary) -> i64 {
  function arrow_counter_agg_num_changes (line 725) | pub fn arrow_counter_agg_num_changes(sketch: CounterSummary, _accessor: ...
  function counter_agg_num_changes (line 730) | fn counter_agg_num_changes(summary: CounterSummary) -> i64 {
  function arrow_counter_agg_num_resets (line 736) | pub fn arrow_counter_agg_num_resets(sketch: CounterSummary, _accessor: A...
  function counter_agg_num_resets (line 741) | fn counter_agg_num_resets(summary: CounterSummary) -> i64 {
  function arrow_counter_agg_slope (line 747) | pub fn arrow_counter_agg_slope(sketch: CounterSummary, _accessor: Access...
  function counter_agg_slope (line 752) | fn counter_agg_slope(summary: CounterSummary) -> Option<f64> {
  function arrow_counter_agg_intercept (line 758) | pub fn arrow_counter_agg_intercept(
  function counter_agg_intercept (line 766) | fn counter_agg_intercept(summary: CounterSummary) -> Option<f64> {
  function arrow_counter_agg_corr (line 772) | pub fn arrow_counter_agg_corr(sketch: CounterSummary, _accessor: Accesso...
  function counter_agg_corr (line 777) | fn counter_agg_corr(summary: CounterSummary) -> Option<f64> {
  function arrow_counter_agg_zero_time (line 783) | pub fn arrow_counter_agg_zero_time(
  function counter_agg_counter_zero_time (line 791) | fn counter_agg_counter_zero_time(summary: CounterSummary) -> Option<crat...
  function arrow_counter_agg_first_val (line 797) | pub fn arrow_counter_agg_first_val(sketch: CounterSummary, _accessor: Ac...
  function counter_agg_first_val (line 802) | fn counter_agg_first_val(summary: CounterSummary) -> f64 {
  function arrow_counter_agg_last_val (line 808) | pub fn arrow_counter_agg_last_val(sketch: CounterSummary, _accessor: Acc...
  function counter_agg_last_val (line 813) | fn counter_agg_last_val(summary: CounterSummary) -> f64 {
  function arrow_counter_agg_first_time (line 819) | pub fn arrow_counter_agg_first_time(
  function counter_agg_first_time (line 827) | fn counter_agg_first_time(summary: CounterSummary) -> crate::raw::Timest...
  function arrow_counter_agg_last_time (line 833) | pub fn arrow_counter_agg_last_time(
  function counter_agg_last_time (line 841) | fn counter_agg_last_time(summary: CounterSummary) -> crate::raw::Timesta...
  type Method (line 849) | pub enum Method {
    method as_str (line 854) | pub fn as_str(&self) -> &'static str {
  function method_kind (line 862) | pub fn method_kind(method: &str) -> Method {
  function as_method (line 869) | pub fn as_method(method: &str) -> Option<Method> {
  function assert_close_enough (line 912) | fn assert_close_enough(p1: &MetricSummary, p2: &MetricSummary) {
  function test_counter_aggregate (line 928) | fn test_counter_aggregate() {
  function test_counter_io (line 1046) | fn test_counter_io() {
  function test_counter_byte_io (line 1124) | fn test_counter_byte_io() {
  function delta_after_counter_decrease (line 1195) | fn delta_after_counter_decrease() {
  function delta_after_counter_increase (line 1206) | fn delta_after_counter_increase() {
  function delta_after_counter_decrease_then_increase_to_same_value (line 1215) | fn delta_after_counter_decrease_then_increase_to_same_value() {
  function delta_after_counter_increase_then_decrease_to_same_value (line 1226) | fn delta_after_counter_increase_then_decrease_to_same_value() {
  function idelta_left_after_counter_decrease (line 1237) | fn idelta_left_after_counter_decrease() {
  function idelta_left_after_counter_increase (line 1246) | fn idelta_left_after_counter_increase() {
  function idelta_left_after_counter_increase_then_decrease_to_same_value (line 1255) | fn idelta_left_after_counter_increase_then_decrease_to_same_value() {
  function idelta_left_after_counter_decrease_then_increase_to_same_value (line 1264) | fn idelta_left_after_counter_decrease_then_increase_to_same_value() {
  function idelta_right_after_counter_decrease (line 1274) | fn idelta_right_after_counter_decrease() {
  function idelta_right_after_counter_increase (line 1283) | fn idelta_right_after_counter_increase() {
  function idelta_right_after_counter_increase_then_decrease_to_same_value (line 1292) | fn idelta_right_after_counter_increase_then_decrease_to_same_value() {
  function idelta_right_after_counter_decrease_then_increase_to_same_value (line 1301) | fn idelta_right_after_counter_decrease_then_increase_to_same_value() {
  function counter_agg_interpolation (line 1310) | fn counter_agg_interpolation() {
  function interpolated_delta_with_aligned_point (line 1485) | fn interpolated_delta_with_aligned_point() {
  function irate_left_arrow_match (line 1540) | fn irate_left_arrow_match() {
  function irate_right_arrow_match (line 1559) | fn irate_right_arrow_match() {
  function idelta_left_arrow_match (line 1578) | fn idelta_left_arrow_match() {
  function idelta_right_arrow_match (line 1597) | fn idelta_right_arrow_match() {
  function num_resets_arrow_match (line 1616) | fn num_resets_arrow_match() {
  function first_and_last_val (line 1635) | fn first_and_last_val() {
  function first_and_last_val_arrow_match (line 1664) | fn first_and_last_val_arrow_match() {
  function first_and_last_time (line 1695) | fn first_and_last_time() {
  function first_and_last_time_arrow_match (line 1725) | fn first_and_last_time_arrow_match() {
  function decrease (line 1766) | pub fn decrease(client: &mut pgrx::spi::SpiClient) {
  function increase (line 1786) | pub fn increase(client: &mut pgrx::spi::SpiClient) {
  function decrease_then_increase_to_same_value (line 1806) | pub fn decrease_then_increase_to_same_value(client: &mut pgrx::spi::SpiC...
  function increase_then_decrease_to_same_value (line 1827) | pub fn increase_then_decrease_to_same_value(client: &mut pgrx::spi::SpiC...
  function make_test_table (line 1848) | pub fn make_test_table(client: &mut pgrx::spi::SpiClient, name: &str) {

FILE: extension/src/counter_agg/accessors.rs
  function counter_interpolated_rate_accessor (line 24) | fn counter_interpolated_rate_accessor(
  function counter_interpolated_delta_accessor (line 65) | fn counter_interpolated_delta_accessor(

FILE: extension/src/countminsketch.rs
  function new (line 28) | fn new(width: u32, depth: u32, counters: Vec<i64>) -> Self {
  function to_internal_countminsketch (line 39) | pub fn to_internal_countminsketch(&self) -> CountMinSketchInternal {
  function from_internal_countminsketch (line 64) | pub fn from_internal_countminsketch(sketch: &mut CountMinSketchInternal)...
  type State (line 80) | type State = CountMinSketchInternal;
  function transition (line 82) | fn transition(
  function finally (line 102) | fn finally(state: Option<&mut State>) -> Option<CountMinSketch<'static>> {
  constant PARALLEL_SAFE (line 106) | const PARALLEL_SAFE: bool = true;
  function serialize (line 108) | fn serialize(state: &mut State) -> bytea {
  function deserialize (line 112) | fn deserialize(bytes: bytea) -> State {
  function combine (line 116) | fn combine(state1: Option<&State>, state2: Option<&State>) -> Option<Sta...
  function approx_count (line 130) | pub fn approx_count<'a>(item: String, aggregate: Option<CountMinSketch<'...
  function test_countminsketch (line 141) | fn test_countminsketch() {
  function test_countminsketch_combine (line 205) | fn test_countminsketch_combine() {
  function countminsketch_io_test (line 233) | fn countminsketch_io_test() {
  function test_cms_null_input_yields_null_output (line 267) | fn test_cms_null_input_yields_null_output() {
  function test_approx_count_null_input_yields_null_output (line 284) | fn test_approx_count_null_input_yields_null_output() {

FILE: extension/src/datum_utils.rs
  function deep_copy_datum (line 19) | pub(crate) unsafe fn deep_copy_datum(datum: Datum, typoid: Oid) -> Datum {
  function free_datum (line 35) | pub(crate) unsafe fn free_datum(datum: Datum, typoid: Oid) {
  function ts_interval_sum_to_ms (line 44) | pub fn ts_interval_sum_to_ms(
  function interval_to_ms (line 63) | pub fn interval_to_ms(ref_time: &crate::raw::TimestampTz, interval: &cra...
  type TextSerializableDatumWriter (line 67) | pub struct TextSerializableDatumWriter {
    method from_oid (line 72) | pub fn from_oid(typoid: Oid) -> Self {
    method make_serializable (line 85) | pub fn make_serializable(&mut self, datum: Datum) -> TextSerializeable...
  type DatumFromSerializedTextReader (line 90) | pub struct DatumFromSerializedTextReader {
    method from_oid (line 96) | pub fn from_oid(typoid: Oid) -> Self {
    method read_datum (line 111) | pub fn read_datum(&mut self, datum_str: &str) -> Datum {
  type TextSerializeableDatum (line 118) | pub struct TextSerializeableDatum(Datum, *mut pg_sys::FmgrInfo);
  method serialize (line 121) | fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
  type DatumHashBuilder (line 131) | pub(crate) struct DatumHashBuilder {
    method from_type_id (line 138) | pub(crate) unsafe fn from_type_id(type_id: pg_sys::Oid, collation: Opt...
    method from_type_cache_entry (line 144) | pub(crate) unsafe fn from_type_cache_entry(
    method deserialize (line 258) | fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
  method clone (line 180) | fn clone(&self) -> Self {
  type Hasher (line 186) | type Hasher = DatumHashBuilder;
  method build_hasher (line 188) | fn build_hasher(&self) -> Self::Hasher {
  method finish (line 198) | fn finish(&self) -> u64 {
  method write (line 214) | fn write(&mut self, bytes: &[u8]) {
  method write_usize (line 224) | fn write_usize(&mut self, i: usize) {
  method eq (line 236) | fn eq(&self, other: &Self) -> bool {
  method serialize (line 244) | fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
  function div_round_up (line 271) | fn div_round_up(numerator: usize, divisor: usize) -> usize {
  function round_to_multiple (line 276) | fn round_to_multiple(value: usize, multiple: usize) -> usize {
  function padded_va_len (line 281) | fn padded_va_len(ptr: *const pg_sys::varlena) -> usize {
  method serialize (line 300) | fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
  function deserialize (line 316) | fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
  function from (line 352) | fn from(input: (Oid, Vec<Datum>)) -> Self {
  type DatumStoreIterator (line 443) | pub enum DatumStoreIterator<'a, 'b> {
  type Item (line 459) | type Item = Datum;
  method next (line 461) | fn next(&mut self) -> Option<Self::Item> {
  function iter (line 495) | pub fn iter<'b>(&'b self) -> DatumStoreIterator<'a, 'b> {
  function into_anyelement_iter (line 529) | pub fn into_anyelement_iter(self) -> impl Iterator<Item = AnyElement> + ...
  type DatumStoreIntoIterator (line 538) | pub enum DatumStoreIntoIterator<'a> {
  type Item (line 557) | type Item = Datum;
  method next (line 559) | fn next(&mut self) -> Option<Self::Item> {
  type Item (line 609) | type Item = Datum;
  type IntoIter (line 610) | type IntoIter = DatumStoreIntoIterator<'a>;
  method into_iter (line 612) | fn into_iter(self) -> Self::IntoIter {
  type State (line 664) | type State = (Oid, Vec<Datum>);
  function transition (line 666) | fn transition(
  function finally (line 682) | fn finally(state: Option<&mut State>) -> Option<DatumStoreTester<'static...
  function test_value_datum_store (line 695) | fn test_value_datum_store() {
  function test_varlena_datum_store (line 706) | fn test_varlena_datum_store() {
  function test_byref_datum_store (line 717) | fn test_byref_datum_store() {

FILE: extension/src/duration.rs
  type DurationUnit (line 9) | pub enum DurationUnit {
    method microseconds (line 19) | pub fn microseconds(self) -> u32 {
    method convert_unit (line 30) | pub fn convert_unit(self, amount: f64, to: Self) -> f64 {
    method from_str (line 36) | pub fn from_str(s: &str) -> Option<Self> {
    method fmt (line 52) | fn fmt(&self, f: &mut Formatter) -> fmt::Result {
  function convert_unit (line 68) | fn convert_unit() {
  function parse_unit (line 76) | fn parse_unit() {

FILE: extension/src/frequency.rs
  constant DEFAULT_ZETA_SKEW (line 42) | const DEFAULT_ZETA_SKEW: f64 = 1.1;
  function zeta_eq_n (line 45) | fn zeta_eq_n(skew: f64, n: u64) -> f64 {
  function zeta_le_n (line 49) | fn zeta_le_n(skew: f64, n: u64) -> f64 {
  type SpaceSavingEntry (line 53) | struct SpaceSavingEntry {
    method clone (line 60) | fn clone(&self, typoid: Oid) -> SpaceSavingEntry {
  type SpaceSavingTransState (line 69) | pub struct SpaceSavingTransState {
    method deserialize (line 138) | fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
    method max_size_for_freq (line 194) | fn max_size_for_freq(min_freq: f64) -> u32 {
    method freq_agg_from_type_id (line 198) | fn freq_agg_from_type_id(min_freq: f64, typ: pg_sys::Oid, collation: O...
    method mcv_agg_from_type_id (line 209) | fn mcv_agg_from_type_id(
    method ingest_aggregate_data (line 236) | fn ingest_aggregate_data(
    method ingest_aggregate_ints (line 257) | fn ingest_aggregate_ints(
    method type_oid (line 279) | fn type_oid(&self) -> Oid {
    method add (line 283) | fn add(&mut self, element: PgAnyElement) {
    method move_left (line 319) | fn move_left(&mut self, i: usize) {
    method update_map_index (line 334) | fn update_map_index(&mut self, i: usize) {
    method update_all_map_indices (line 343) | fn update_all_map_indices(&mut self) {
    method combine (line 349) | fn combine(one: &SpaceSavingTransState, two: &SpaceSavingTransState) -...
    method from (line 459) | fn from(data_in: (&SpaceSavingAggregate<'input>, &pg_sys::FunctionCall...
    method from (line 535) | fn from(
    method from (line 609) | fn from(data_in: (&SpaceSavingTextAggregate<'input>, &pg_sys::Function...
  method clone (line 79) | fn clone(&self) -> Self {
  method serialize (line 112) | fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
  function from (line 430) | fn from(trans: &SpaceSavingTransState) -> Self {
  function from (line 502) | fn from(trans: &SpaceSavingTransState) -> Self {
  function from (line 579) | fn from(trans: &SpaceSavingTransState) -> Self {
  function mcv_agg_trans (line 635) | pub fn mcv_agg_trans(
  function mcv_agg_bigint_trans (line 645) | pub fn mcv_agg_bigint_trans(
  function mcv_agg_text_trans (line 655) | pub fn mcv_agg_text_trans(
  function mcv_agg_with_skew_trans (line 665) | pub fn mcv_agg_with_skew_trans(
  function mcv_agg_with_skew_bigint_trans (line 684) | pub fn mcv_agg_with_skew_bigint_trans(
  function mcv_agg_with_skew_text_trans (line 710) | pub fn mcv_agg_with_skew_text_trans(
  function freq_agg_trans (line 737) | pub fn freq_agg_trans(
  function freq_agg_bigint_trans (line 757) | pub fn freq_agg_bigint_trans(
  function freq_agg_text_trans (line 773) | pub fn freq_agg_text_trans(
  function space_saving_trans (line 789) | pub fn space_saving_trans<F>(
  function rollup_agg_trans (line 820) | pub fn rollup_agg_trans<'input>(
  function rollup_agg_trans_inner (line 832) | pub fn rollup_agg_trans_inner(
  function rollup_agg_bigint_trans (line 850) | pub fn rollup_agg_bigint_trans<'input>(
  function rollup_agg_bigint_trans_inner (line 862) | pub fn rollup_agg_bigint_trans_inner(
  function rollup_agg_text_trans (line 880) | pub fn rollup_agg_text_trans<'input>(
  function rollup_agg_text_trans_inner (line 892) | pub fn rollup_agg_text_trans_inner(
  function space_saving_combine (line 910) | pub fn space_saving_combine(
  function space_saving_combine_inner (line 917) | pub fn space_saving_combine_inner(
  function space_saving_final (line 933) | fn space_saving_final(
  function space_saving_bigint_final (line 942) | fn space_saving_bigint_final(
  function space_saving_text_final (line 951) | fn space_saving_text_final(
  function space_saving_serialize (line 960) | fn space_saving_serialize(state: Internal) -> bytea {
  function space_saving_deserialize (line 966) | pub fn space_saving_deserialize(bytes: bytea, _internal: Internal) -> Op...
  function freq_iter (line 1260) | pub fn freq_iter<'a>(
  function freq_bigint_iter (line 1291) | pub fn freq_bigint_iter<'a>(
  function arrow_freq_bigint_iter (line 1314) | pub fn arrow_freq_bigint_iter<'a>(
  function freq_text_iter (line 1329) | pub fn freq_text_iter<'a>(
  function arrow_freq_text_iter (line 1353) | pub fn arrow_freq_text_iter<'a>(
  function validate_topn_for_mcv_agg (line 1367) | fn validate_topn_for_mcv_agg(
  function topn (line 1397) | pub fn topn(
  function default_topn (line 1433) | pub fn default_topn(
  function topn_bigint (line 1445) | pub fn topn_bigint(agg: SpaceSavingBigIntAggregate<'_>, n: i32) -> SetOf...
  function arrow_topn_bigint (line 1466) | pub fn arrow_topn_bigint<'a>(
  function default_topn_bigint (line 1474) | pub fn default_topn_bigint(agg: SpaceSavingBigIntAggregate<'_>) -> SetOf...
  function arrow_default_topn_bigint (line 1484) | pub fn arrow_default_topn_bigint<'a>(
  function topn_text (line 1492) | pub fn topn_text(agg: SpaceSavingTextAggregate<'_>, n: i32) -> SetOfIter...
  function arrow_topn_text (line 1516) | pub fn arrow_topn_text<'a>(
  function default_topn_text (line 1524) | pub fn default_topn_text(agg: SpaceSavingTextAggregate<'_>) -> SetOfIter...
  function arrow_default_topn_text (line 1534) | pub fn arrow_default_topn_text<'a>(
  function max_frequency (line 1542) | pub fn max_frequency(agg: SpaceSavingAggregate<'_>, value: AnyElement) -...
  function min_frequency (line 1555) | pub fn min_frequency(agg: SpaceSavingAggregate<'_>, value: AnyElement) -...
  function max_bigint_frequency (line 1570) | pub fn max_bigint_frequency(agg: SpaceSavingBigIntAggregate<'_>, value: ...
  function arrow_max_bigint_frequency (line 1579) | pub fn arrow_max_bigint_frequency<'a>(
  function min_bigint_frequency (line 1587) | pub fn min_bigint_frequency(agg: SpaceSavingBigIntAggregate<'_>, value: ...
  function arrow_min_bigint_frequency (line 1598) | pub fn arrow_min_bigint_frequency<'a>(
  function max_text_frequency (line 1607) | pub fn max_text_frequency(agg: SpaceSavingTextAggregate<'_>, value: text...
  function min_text_frequency (line 1621) | pub fn min_text_frequency(agg: SpaceSavingTextAggregate<'_>, value: text...
  type TopNIterator (line 1635) | struct TopNIterator<Input, InputIterator: std::iter::Iterator<Item = Inp...
  function new (line 1645) | fn new(
  type Item (line 1666) | type Item = Input;
  method next (line 1667) | fn next(&mut self) -> Option<Self::Item> {
  function varlena_to_string (line 1682) | unsafe fn varlena_to_string(vl: *const pg_sys::varlena) -> String {
  function test_freq_aggregate (line 1700) | fn test_freq_aggregate() {
  function test_topn_aggregate (line 1746) | fn test_topn_aggregate() {
  function explicit_aggregate_test (line 1794) | fn explicit_aggregate_test() {
  function setup_with_test_table (line 2007) | fn setup_with_test_table(client: &mut pgrx::spi::SpiClient) {
  function test_topn (line 2054) | fn test_topn() {
  function topn_on_underskewed_mcv_agg (line 2123) | fn topn_on_underskewed_mcv_agg() {
  function topn_high_n_on_mcv_agg (line 2138) | fn topn_high_n_on_mcv_agg() {
  function topn_requires_n_for_freq_agg (line 2153) | fn topn_requires_n_for_freq_agg() {
  function test_into_values (line 2171) | fn test_into_values() {
  function test_frequency_getters (line 2234) | fn test_frequency_getters() {
  function test_rollups (line 2285) | fn test_rollups() {
  function test_freq_agg_invariant (line 2358) | fn test_freq_agg_invariant() {
  function test_freq_agg_rollup_maintains_invariant (line 2393) | fn test_freq_agg_rollup_maintains_invariant() {
  function test_mcv_agg_invariant (line 2442) | fn test_mcv_agg_invariant() {

FILE: extension/src/gauge_agg.rs
  type FlatSummary (line 29) | pub struct FlatSummary {
  method interpolate (line 54) | pub(super) fn interpolate(
  type GaugeSummaryTransState (line 118) | struct GaugeSummaryTransState {
    method new (line 130) | fn new() -> Self {
    method push_point (line 138) | fn push_point(&mut self, value: TSPoint) {
    method combine_points (line 142) | fn combine_points(&mut self) {
    method push_summary (line 163) | fn push_summary(&mut self, other: &Self) {
    method combine_summaries (line 170) | fn combine_summaries(&mut self) {
  function gauge_summary_trans_serialize (line 190) | fn gauge_summary_trans_serialize(state: Internal) -> bytea {
  function gauge_summary_trans_deserialize (line 198) | fn gauge_summary_trans_deserialize(bytes: bytea, _internal: Internal) ->...
  function gauge_summary_trans_deserialize_inner (line 201) | fn gauge_summary_trans_deserialize_inner(bytes: bytea) -> Inner<GaugeSum...
  function gauge_agg_trans (line 207) | fn gauge_agg_trans(
  function gauge_agg_trans_inner (line 216) | fn gauge_agg_trans_inner(
  function gauge_agg_trans_no_bounds (line 249) | fn gauge_agg_trans_no_bounds(
  function gauge_agg_summary_trans (line 259) | fn gauge_agg_summary_trans(
  function gauge_agg_summary_trans_inner (line 266) | fn gauge_agg_summary_trans_inner(
  function gauge_agg_combine (line 288) | fn gauge_agg_combine(
  function gauge_agg_combine_inner (line 295) | fn gauge_agg_combine_inner(
  function gauge_agg_final (line 328) | fn gauge_agg_final(state: Internal, fcinfo: pg_sys::FunctionCallInfo) ->...
  function gauge_agg_final_inner (line 331) | fn gauge_agg_final_inner(
  function arrow_delta (line 430) | fn arrow_delta(sketch: GaugeSummary, _accessor: AccessorDelta) -> f64 {
  function delta (line 435) | fn delta(summary: GaugeSummary) -> f64 {
  function arrow_gauge_agg_rate (line 441) | fn arrow_gauge_agg_rate(sketch: GaugeSummary, _accessor: AccessorRate) -...
  function rate (line 446) | fn rate(summary: GaugeSummary) -> Option<f64> {
  function arrow_time_delta (line 452) | fn arrow_time_delta(sketch: GaugeSummary, _accessor: AccessorTimeDelta) ...
  function time_delta (line 457) | fn time_delta(summary: GaugeSummary) -> f64 {
  function arrow_irate_left (line 463) | fn arrow_irate_left(sketch: GaugeSummary, _accessor: AccessorIrateLeft) ...
  function irate_left (line 468) | fn irate_left(summary: GaugeSummary) -> Option<f64> {
  function arrow_irate_right (line 474) | fn arrow_irate_right(sketch: GaugeSummary, _accessor: AccessorIrateRight...
  function irate_right (line 479) | fn irate_right(summary: GaugeSummary) -> Option<f64> {
  function arrow_idelta_left (line 485) | fn arrow_idelta_left(sketch: GaugeSummary, _accessor: AccessorIdeltaLeft...
  function idelta_left (line 490) | fn idelta_left(summary: GaugeSummary) -> f64 {
  function arrow_idelta_right (line 496) | fn arrow_idelta_right(sketch: GaugeSummary, _accessor: AccessorIdeltaRig...
  function idelta_right (line 501) | fn idelta_right(summary: GaugeSummary) -> f64 {
  function arrow_with_bounds (line 507) | fn arrow_with_bounds(sketch: GaugeSummary, accessor: AccessorWithBounds)...
  function with_bounds (line 514) | fn with_bounds(summary: GaugeSummary, bounds: tstzrange) -> GaugeSummary {
  function arrow_extrapolated_delta (line 526) | fn arrow_extrapolated_delta(
  function extrapolated_delta (line 534) | fn extrapolated_delta(summary: GaugeSummary) -> Option<f64> {
  function interpolated_delta (line 539) | fn interpolated_delta(
  function arrow_extrapolated_rate (line 552) | fn arrow_extrapolated_rate(
  function extrapolated_rate (line 560) | fn extrapolated_rate(summary: GaugeSummary) -> Option<f64> {
  function interpolated_rate (line 565) | fn interpolated_rate(
  function arrow_num_elements (line 578) | fn arrow_num_elements(sketch: GaugeSummary, _accessor: AccessorNumElemen...
  function num_elements (line 583) | fn num_elements(summary: GaugeSummary) -> i64 {
  function arrow_num_changes (line 589) | fn arrow_num_changes(sketch: GaugeSummary, _accessor: AccessorNumChanges...
  function num_changes (line 594) | fn num_changes(summary: GaugeSummary) -> i64 {
  function arrow_slope (line 600) | fn arrow_slope(sketch: GaugeSummary, _accessor: AccessorSlope) -> Option...
  function slope (line 605) | fn slope(summary: GaugeSummary) -> Option<f64> {
  function arrow_intercept (line 611) | fn arrow_intercept(sketch: GaugeSummary, _accessor: AccessorIntercept) -...
  function intercept (line 616) | fn intercept(summary: GaugeSummary) -> Option<f64> {
  function arrow_corr (line 622) | fn arrow_corr(sketch: GaugeSummary, _accessor: AccessorCorr) -> Option<f...
  function corr (line 627) | fn corr(summary: GaugeSummary) -> Option<f64> {
  function arrow_zero_time (line 633) | fn arrow_zero_time(
  function gauge_zero_time (line 641) | fn gauge_zero_time(summary: GaugeSummary) -> Option<crate::raw::Timestam...
  method from (line 646) | fn from(pg: GaugeSummary) -> Self {
  method from (line 662) | fn from(internal: MetricSummary) -> Self {
  function round_trip (line 703) | fn round_trip() {
  function delta_after_gauge_decrease (line 779) | fn delta_after_gauge_decrease() {
  function delta_after_gauge_increase (line 788) | fn delta_after_gauge_increase() {
  function delta_after_gauge_decrease_then_increase_to_same_value (line 797) | fn delta_after_gauge_decrease_then_increase_to_same_value() {
  function delta_after_gauge_increase_then_decrease_to_same_value (line 806) | fn delta_after_gauge_increase_then_decrease_to_same_value() {
  function idelta_left_after_gauge_decrease (line 815) | fn idelta_left_after_gauge_decrease() {
  function idelta_left_after_gauge_increase (line 824) | fn idelta_left_after_gauge_increase() {
  function idelta_left_after_gauge_increase_then_decrease_to_same_value (line 833) | fn idelta_left_after_gauge_increase_then_decrease_to_same_value() {
  function idelta_left_after_gauge_decrease_then_increase_to_same_value (line 842) | fn idelta_left_after_gauge_decrease_then_increase_to_same_value() {
  function idelta_right_after_gauge_decrease (line 851) | fn idelta_right_after_gauge_decrease() {
  function idelta_right_after_gauge_increase (line 860) | fn idelta_right_after_gauge_increase() {
  function idelta_right_after_gauge_increase_then_decrease_to_same_value (line 869) | fn idelta_right_after_gauge_increase_then_decrease_to_same_value() {
  function idelta_right_after_gauge_decrease_then_increase_to_same_value (line 878) | fn idelta_right_after_gauge_decrease_then_increase_to_same_value() {
  function assert_close_enough (line 888) | fn assert_close_enough(p1: &MetricSummary, p2: &MetricSummary) {
  function rollup (line 905) | fn rollup() {
  function gauge_agg_interpolation (line 952) | fn gauge_agg_interpolation() {
  function guage_agg_interpolated_delta_with_aligned_point (line 1042) | fn guage_agg_interpolated_delta_with_aligned_point() {
  function no_results_on_null_input (line 1091) | fn no_results_on_null_input() {

FILE: extension/src/heartbeat_agg.rs
  constant BUFFER_SIZE (line 27) | const BUFFER_SIZE: usize = 1000;
  type HeartbeatTransState (line 31) | pub struct HeartbeatTransState {
    method new (line 41) | pub fn new(start: i64, end: i64, interval: i64) -> Self {
    method insert (line 53) | pub fn insert(&mut self, time: i64) {
    method process_batch (line 61) | pub fn process_batch(&mut self) {
    method extend_covered_interval (line 96) | fn extend_covered_interval(&mut self, new_start: i64, new_end: i64) {
    method combine_intervals (line 110) | fn combine_intervals(&mut self, new_intervals: Vec<(i64, i64)>) {
    method combine (line 167) | pub fn combine(&mut self, mut other: HeartbeatTransState) {
    method get_buffer (line 184) | pub fn get_buffer(&self) -> &Vec<i64> {
    method get_liveness (line 187) | pub fn get_liveness(&self) -> &Vec<(i64, i64)> {
    method from (line 581) | fn from(agg: HeartbeatAgg<'static>) -> Self {
  function trim_to (line 209) | fn trim_to(self, start: Option<i64>, end: Option<i64>) -> HeartbeatAgg<'...
  function sum_live_intervals (line 271) | fn sum_live_intervals(self) -> i64 {
  function interpolate_start (line 281) | fn interpolate_start(&mut self, pred: &Self) {
  function live_ranges (line 325) | pub fn live_ranges(
  function arrow_heartbeat_agg_live_ranges (line 340) | pub fn arrow_heartbeat_agg_live_ranges(
  function dead_ranges (line 348) | pub fn dead_ranges(
  function arrow_heartbeat_agg_dead_ranges (line 386) | pub fn arrow_heartbeat_agg_dead_ranges(
  function uptime (line 394) | pub fn uptime(agg: HeartbeatAgg<'static>) -> Interval {
  function arrow_heartbeat_agg_uptime (line 400) | pub fn arrow_heartbeat_agg_uptime(
  function interpolated_uptime (line 408) | pub fn interpolated_uptime(
  function arrow_heartbeat_agg_interpolated_uptime (line 417) | pub fn arrow_heartbeat_agg_interpolated_uptime(
  function downtime (line 425) | pub fn downtime(agg: HeartbeatAgg<'static>) -> Interval {
  function arrow_heartbeat_agg_downtime (line 431) | pub fn arrow_heartbeat_agg_downtime(
  function interpolated_downtime (line 439) | pub fn interpolated_downtime(
  function arrow_heartbeat_agg_interpolated_downtime (line 448) | pub fn arrow_heartbeat_agg_interpolated_downtime(
  function live_at (line 456) | pub fn live_at(agg: HeartbeatAgg<'static>, test: TimestampTz) -> bool {
  function arrow_heartbeat_agg_live_at (line 485) | pub fn arrow_heartbeat_agg_live_at(
  function interpolate_heartbeat_agg (line 494) | fn interpolate_heartbeat_agg(
  function arrow_heartbeat_agg_interpolate (line 507) | pub fn arrow_heartbeat_agg_interpolate(
  function num_live_ranges (line 515) | pub fn num_live_ranges(agg: HeartbeatAgg<'static>) -> i64 {
  function arrow_heartbeat_agg_num_live_ranges (line 521) | pub fn arrow_heartbeat_agg_num_live_ranges(
  function num_gaps (line 529) | pub fn num_gaps(agg: HeartbeatAgg<'static>) -> i64 {
  function arrow_heartbeat_agg_num_gaps (line 545) | pub fn arrow_heartbeat_agg_num_gaps(agg: HeartbeatAgg<'static>, _accesso...
  function trim_to (line 550) | pub fn trim_to(
  function arrow_heartbeat_agg_trim_to (line 568) | pub fn arrow_heartbeat_agg_trim_to(
  function heartbeat_trans (line 598) | pub fn heartbeat_trans(
  function heartbeat_trans_inner (line 616) | pub fn heartbeat_trans_inner(
  function heartbeat_final (line 639) | pub fn heartbeat_final(
  function heartbeat_final_inner (line 645) | pub fn heartbeat_final_inner(
  function heartbeat_rollup_trans (line 678) | pub fn heartbeat_rollup_trans(
  function heartbeat_rollup_trans_inner (line 685) | pub fn heartbeat_rollup_trans_inner(
  function test_heartbeat_trans_state (line 739) | pub fn test_heartbeat_trans_state() {
  function test_heartbeat_agg (line 794) | pub fn test_heartbeat_agg() {
  function test_heartbeat_rollup (line 1076) | pub fn test_heartbeat_rollup() {
  function test_heartbeat_combining_rollup (line 1126) | pub fn test_heartbeat_combining_rollup() {
  function test_heartbeat_trim_to (line 1226) | pub fn test_heartbeat_trim_to() {
  function test_heartbeat_agg_interpolation (line 1303) | pub fn test_heartbeat_agg_interpolation() {
  function test_heartbeat_agg_text_io (line 1690) | fn test_heartbeat_agg_text_io() {
  function test_heartbeat_agg_byte_io (line 1741) | fn test_heartbeat_agg_byte_io() {
  function test_rollup_overlap (line 1810) | fn test_rollup_overlap() {

FILE: extension/src/heartbeat_agg/accessors.rs
  function empty_agg (line 9) | fn empty_agg<'a>() -> HeartbeatAgg<'a> {
  function heartbeat_agg_interpolated_uptime_accessor (line 33) | fn heartbeat_agg_interpolated_uptime_accessor<'a>(
  function pred (line 48) | pub fn pred(&self) -> Option<HeartbeatAgg<'a>> {
  function heartbeat_agg_interpolated_downtime_accessor (line 67) | fn heartbeat_agg_interpolated_downtime_accessor<'a>(
  function pred (line 82) | pub fn pred(&self) -> Option<HeartbeatAgg<'a>> {
  function heartbeat_agg_interpolate_accessor (line 101) | fn heartbeat_agg_interpolate_accessor<'a>(
  function pred (line 116) | pub fn pred(&self) -> Option<HeartbeatAgg<'a>> {
  function heartbeat_agg_trim_to_accessor (line 138) | fn heartbeat_agg_trim_to_accessor(

FILE: extension/src/hyperloglog.rs
  type HashableDatum (line 27) | struct HashableDatum(Datum);
  method hash (line 31) | fn hash<H: Hasher>(&self, state: &mut H) {
  type HyperLogLogTrans (line 37) | pub struct HyperLogLogTrans {
  function hyperloglog_trans (line 44) | pub fn hyperloglog_trans(
  constant APPROX_COUNT_DISTINCT_DEFAULT_SIZE (line 58) | const APPROX_COUNT_DISTINCT_DEFAULT_SIZE: i32 = 32768;
  function approx_count_distinct_trans (line 62) | pub fn approx_count_distinct_trans(
  function hyperloglog_trans_inner (line 79) | pub fn hyperloglog_trans_inner(
  function hyperloglog_combine (line 126) | pub fn hyperloglog_combine(
  function hyperloglog_combine_inner (line 133) | pub fn hyperloglog_combine_inner(
  function hyperloglog_serialize (line 155) | pub fn hyperloglog_serialize(state: Internal) -> bytea {
  function hyperloglog_deserialize (line 163) | pub fn hyperloglog_deserialize(bytes: bytea, _internal: Internal) -> Opt...
  function hyperloglog_deserialize_inner (line 166) | pub fn hyperloglog_deserialize_inner(bytes: bytea) -> Inner<HyperLogLogT...
  function hyperloglog_final (line 209) | fn hyperloglog_final(
  function hyperloglog_final_inner (line 215) | fn hyperloglog_final_inner(
  function hyperloglog_union (line 278) | pub fn hyperloglog_union<'a>(
  function hyperloglog_union_inner (line 285) | pub fn hyperloglog_union_inner(
  function arrow_hyperloglog_count (line 343) | pub fn arrow_hyperloglog_count<'a>(
  function hyperloglog_count (line 351) | pub fn hyperloglog_count<'a>(hyperloglog: HyperLogLog<'a>) -> i64 {
  function arrow_hyperloglog_error (line 376) | pub fn arrow_hyperloglog_error<'a>(sketch: HyperLogLog<'a>, _accessor: A...
  function hyperloglog_error (line 381) | pub fn hyperloglog_error<'a>(hyperloglog: HyperLogLog<'a>) -> f64 {
  function build_from (line 390) | pub fn build_from(
  function flatten_log (line 414) | fn flatten_log(hyperloglog: &mut HLL<HashableDatum, DatumHashBuilder>) -...
  function unflatten_log (line 451) | fn unflatten_log(hyperloglog: HyperLogLog) -> HLL<HashableDatum, DatumHa...
  function test_hll_aggregate (line 488) | fn test_hll_aggregate() {
  function test_approx_count_distinct_aggregate (line 547) | fn test_approx_count_distinct_aggregate() {
  function test_hll_byte_io (line 623) | fn test_hll_byte_io() {
  function test_hll_aggregate_int (line 703) | fn test_hll_aggregate_int() {
  function test_hll_aggregate_text (line 757) | fn test_hll_aggregate_text() {
  function test_hll_union_text (line 819) | fn test_hll_union_text() {
  function test_hll_null_input_yields_null_output (line 881) | fn test_hll_null_input_yields_null_output() {
  function test_hll_error_too_small (line 896) | fn test_hll_error_too_small() {
  function test_hll_size_min (line 909) | fn test_hll_size_min() {
  function test_hll_size_max (line 922) | fn test_hll_size_max() {
  function stderror_arrow_match (line 935) | fn stderror_arrow_match() {
  function bias_correct_values_accurate (line 958) | fn bias_correct_values_accurate() {
  function test_hll_error_too_large (line 994) | fn test_hll_error_too_large() {
  function test_hll_null_rollup (line 1007) | fn test_hll_null_rollup() {

FILE: extension/src/lib.rs
  function _PG_init (line 62) | pub extern "C-unwind" fn _PG_init() {
  function setup (line 74) | pub fn setup(_options: Vec<&str>) {
  function postgresql_conf_options (line 78) | pub fn postgresql_conf_options() -> Vec<&'static str> {

FILE: extension/src/lttb.rs
  type LttbTrans (line 15) | pub struct LttbTrans {
  function lttb_trans (line 22) | pub fn lttb_trans(
  function lttb_trans_inner (line 31) | pub fn lttb_trans_inner(
  function lttb_final (line 69) | pub fn lttb_final(
  function lttb_final_inner (line 75) | pub fn lttb_final_inner(
  function gp_lttb_trans (line 100) | pub fn gp_lttb_trans(
  function gp_lttb_final (line 130) | pub fn gp_lttb_final(
  function gap_preserving_lttb_final_inner (line 136) | pub fn gap_preserving_lttb_final_inner(
  function lttb (line 259) | pub fn lttb(data: &[TSPoint], threshold: usize) -> Cow<'_, [TSPoint]> {
  function lttb_on_timevector (line 334) | pub fn lttb_on_timevector(
  function lttb_ts (line 342) | pub fn lttb_ts(data: Timevector_TSTZ_F64, threshold: usize) -> Timevecto...
  function test_lttb_equivalence (line 438) | fn test_lttb_equivalence() {
  function test_lttb_result (line 501) | fn test_lttb_result() {
  function test_gp_lttb (line 550) | fn test_gp_lttb() {
  function test_gp_lttb_with_gap (line 607) | fn test_gp_lttb_with_gap() {

FILE: extension/src/nmost.rs
  type NMostTransState (line 28) | pub struct NMostTransState<T: Ord> {
  function new (line 34) | fn new(capacity: usize, first_val: T) -> NMostTransState<T> {
  function new_entry (line 45) | fn new_entry(&mut self, new_val: T) {
  function belongs_in_heap (line 58) | fn belongs_in_heap(&self, val: &T) -> bool {
  function from (line 65) | fn from(input: (&[T], usize)) -> Self {
  function nmost_trans_function (line 75) | fn nmost_trans_function<T: Ord>(
  function nmost_rollup_trans_function (line 94) | fn nmost_rollup_trans_function<T: Ord + Copy>(
  function nmost_trans_combine (line 119) | fn nmost_trans_combine<T: Clone + Ord + Copy>(
  type NMostByTransState (line 144) | pub struct NMostByTransState<T: Ord> {
  function new (line 151) | fn new(capacity: usize, first_val: T, first_element: pgrx::AnyElement) -...
  function new_entry (line 161) | fn new_entry(&mut self, new_val: T, new_element: pgrx::AnyElement) {
  function into_sorted_parts (line 189) | fn into_sorted_parts(self) -> (usize, Vec<T>, DatumStore<'static>) {
  function from (line 208) | fn from(in_tuple: (&[T], &DatumStore, usize)) -> Self {
  function nmost_by_trans_function (line 219) | fn nmost_by_trans_function<T: Ord + Clone>(
  function nmost_by_rollup_trans_function (line 239) | fn nmost_by_rollup_trans_function<T: Ord + Copy>(

FILE: extension/src/nmost/max_by_float.rs
  type MaxByFloatTransType (line 15) | type MaxByFloatTransType = NMostByTransState<Reverse<NotNan<f64>>>;
  function from (line 27) | fn from(item: MaxByFloatTransType) -> Self {
  function max_n_by_float_trans (line 48) | pub fn max_n_by_float_trans(
  function max_n_by_float_rollup_trans (line 66) | pub fn max_n_by_float_rollup_trans(
  function max_n_by_float_final (line 89) | pub fn max_n_by_float_final(state: Internal) -> MaxByFloats<'static> {
  function max_n_by_float_to_values (line 94) | pub fn max_n_by_float_to_values(
  function max_by_float_correctness (line 142) | fn max_by_float_correctness() {

FILE: extension/src/nmost/max_by_int.rs
  type MaxByIntTransType (line 14) | type MaxByIntTransType = NMostByTransState<Reverse<i64>>;
  function from (line 26) | fn from(item: MaxByIntTransType) -> Self {
  function max_n_by_int_trans (line 47) | pub fn max_n_by_int_trans(
  function max_n_by_int_rollup_trans (line 65) | pub fn max_n_by_int_rollup_trans(
  function max_n_by_int_final (line 88) | pub fn max_n_by_int_final(state: Internal) -> MaxByInts<'static> {
  function max_n_by_int_to_values (line 93) | pub fn max_n_by_int_to_values(
  function max_by_int_correctness (line 141) | fn max_by_int_correctness() {

FILE: extension/src/nmost/max_by_time.rs
  type MaxByTimeTransType (line 14) | type MaxByTimeTransType = NMostByTransState<Reverse<pg_sys::TimestampTz>>;
  function from (line 26) | fn from(item: MaxByTimeTransType) -> Self {
  function max_n_by_time_trans (line 47) | pub fn max_n_by_time_trans(
  function max_n_by_time_rollup_trans (line 65) | pub fn max_n_by_time_rollup_trans(
  function max_n_by_time_final (line 88) | pub fn max_n_by_time_final(state: Internal) -> MaxByTimes<'static> {
  function max_n_by_time_to_values (line 93) | pub fn max_n_by_time_to_values(
  function max_by_time_correctness (line 148) | fn max_by_time_correctness() {

FILE: extension/src/nmost/max_float.rs
  type MaxFloatTransType (line 17) | type MaxFloatTransType = NMostTransState<Reverse<NotNan<f64>>>;
  function from (line 30) | fn from(item: &mut MaxFloatTransType) -> Self {
  function max_n_float_trans (line 48) | pub fn max_n_float_trans(
  function max_n_float_rollup_trans (line 64) | pub fn max_n_float_rollup_trans(
  function max_n_float_combine (line 85) | pub fn max_n_float_combine(
  function max_n_float_serialize (line 99) | pub fn max_n_float_serialize(state: Internal) -> bytea {
  function max_n_float_deserialize (line 105) | pub fn max_n_float_deserialize(bytes: bytea, _internal: Internal) -> Opt...
  function max_n_float_final (line 111) | pub fn max_n_float_final(state: Internal) -> MaxFloats<'static> {
  function max_n_float_to_array (line 116) | pub fn max_n_float_to_array(agg: MaxFloats<'static>) -> Vec<f64> {
  function max_n_float_to_values (line 121) | pub fn max_n_float_to_values(agg: MaxFloats<'static>) -> SetOfIterator<'...
  function arrow_max_float_into_values (line 127) | pub fn arrow_max_float_into_values(
  function arrow_max_float_into_array (line 135) | pub fn arrow_max_float_into_array(
  function max_float_correctness (line 197) | fn max_float_correctness() {

FILE: extension/src/nmost/max_int.rs
  type MaxIntTransType (line 16) | type MaxIntTransType = NMostTransState<Reverse<i64>>;
  function from (line 29) | fn from(item: &mut MaxIntTransType) -> Self {
  function max_n_int_trans (line 47) | pub fn max_n_int_trans(
  function max_n_int_rollup_trans (line 63) | pub fn max_n_int_rollup_trans(
  function max_n_int_combine (line 79) | pub fn max_n_int_combine(
  function max_n_int_serialize (line 93) | pub fn max_n_int_serialize(state: Internal) -> bytea {
  function max_n_int_deserialize (line 99) | pub fn max_n_int_deserialize(bytes: bytea, _internal: Internal) -> Optio...
  function max_n_int_final (line 105) | pub fn max_n_int_final(state: Internal) -> MaxInts<'static> {
  function max_n_int_to_array (line 110) | pub fn max_n_int_to_array(agg: MaxInts<'static>) -> Vec<i64> {
  function max_n_int_to_values (line 115) | pub fn max_n_int_to_values(agg: MaxInts<'static>) -> SetOfIterator<'stat...
  function arrow_max_int_into_values (line 121) | pub fn arrow_max_int_into_values(
  function arrow_max_int_into_array (line 129) | pub fn arrow_max_int_into_array(agg: MaxInts<'static>, _accessor: Access...
  function max_int_correctness (line 188) | fn max_int_correctness() {

FILE: extension/src/nmost/max_time.rs
  type MaxTimeTransType (line 16) | type MaxTimeTransType = NMostTransState<Reverse<pg_sys::TimestampTz>>;
  function from (line 29) | fn from(item: &mut MaxTimeTransType) -> Self {
  function max_n_time_trans (line 47) | pub fn max_n_time_trans(
  function max_n_time_rollup_trans (line 63) | pub fn max_n_time_rollup_trans(
  function max_n_time_combine (line 80) | pub fn max_n_time_combine(
  function max_n_time_serialize (line 94) | pub fn max_n_time_serialize(state: Internal) -> bytea {
  function max_n_time_deserialize (line 100) | pub fn max_n_time_deserialize(bytes: bytea, _internal: Internal) -> Opti...
  function max_n_time_final (line 106) | pub fn max_n_time_final(state: Internal) -> MaxTimes<'static> {
  function max_n_time_to_array (line 111) | pub fn max_n_time_to_array(agg: MaxTimes<'static>) -> Vec<crate::raw::Ti...
  function max_n_time_to_values (line 120) | pub fn max_n_time_to_values(
  function arrow_max_time_into_values (line 133) | pub fn arrow_max_time_into_values(
  function arrow_max_time_into_array (line 141) | pub fn arrow_max_time_into_array(
  function max_time_correctness (line 203) | fn max_time_correctness() {

FILE: extension/src/nmost/min_by_float.rs
  type MinByFloatTransType (line 14) | type MinByFloatTransType = NMostByTransState<NotNan<f64>>;
  function from (line 26) | fn from(item: MinByFloatTransType) -> Self {
  function min_n_by_float_trans (line 47) | pub fn min_n_by_float_trans(
  function min_n_by_float_rollup_trans (line 65) | pub fn min_n_by_float_rollup_trans(
  function min_n_by_float_final (line 88) | pub fn min_n_by_float_final(state: Internal) -> MinByFloats<'static> {
  function min_n_by_float_to_values (line 93) | pub fn min_n_by_float_to_values(
  function min_by_float_correctness (line 141) | fn min_by_float_correctness() {

FILE: extension/src/nmost/min_by_int.rs
  type MinByIntTransType (line 12) | type MinByIntTransType = NMostByTransState<i64>;
  function from (line 24) | fn from(item: MinByIntTransType) -> Self {
  function min_n_by_int_trans (line 41) | pub fn min_n_by_int_trans(
  function min_n_by_int_rollup_trans (line 59) | pub fn min_n_by_int_rollup_trans(
  function min_n_by_int_final (line 75) | pub fn min_n_by_int_final(state: Internal) -> MinByInts<'static> {
  function min_n_by_int_to_values (line 80) | pub fn min_n_by_int_to_values(
  function min_by_int_correctness (line 128) | fn min_by_int_correctness() {

FILE: extension/src/nmost/min_by_time.rs
  type MinByTimeTransType (line 12) | type MinByTimeTransType = NMostByTransState<pg_sys::TimestampTz>;
  function from (line 24) | fn from(item: MinByTimeTransType) -> Self {
  function min_n_by_time_trans (line 41) | pub fn min_n_by_time_trans(
  function min_n_by_time_rollup_trans (line 59) | pub fn min_n_by_time_rollup_trans(
  function min_n_by_time_final (line 75) | pub fn min_n_by_time_final(state: Internal) -> MinByTimes<'static> {
  function min_n_by_time_to_values (line 80) | pub fn min_n_by_time_to_values(
  function min_by_time_correctness (line 135) | fn min_by_time_correctness() {

FILE: extension/src/nmost/min_float.rs
  type MinFloatTransType (line 16) | type MinFloatTransType = NMostTransState<NotNan<f64>>;
  function from (line 29) | fn from(item: &mut MinFloatTransType) -> Self {
  function min_n_float_trans (line 47) | pub fn min_n_float_trans(
  function min_n_float_rollup_trans (line 63) | pub fn min_n_float_rollup_trans(
  function min_n_float_combine (line 84) | pub fn min_n_float_combine(
  function min_n_float_serialize (line 98) | pub fn min_n_float_serialize(state: Internal) -> bytea {
  function min_n_float_deserialize (line 104) | pub fn min_n_float_deserialize(bytes: bytea, _internal: Internal) -> Opt...
  function min_n_float_final (line 110) | pub fn min_n_float_final(state: Internal) -> MinFloats<'static> {
  function min_n_float_to_array (line 115) | pub fn min_n_float_to_array(agg: MinFloats<'static>) -> Vec<f64> {
  function min_n_float_to_values (line 120) | pub fn min_n_float_to_values(agg: MinFloats<'static>) -> SetOfIterator<'...
  function arrow_min_float_into_values (line 126) | pub fn arrow_min_float_into_values(
  function arrow_min_float_into_array (line 134) | pub fn arrow_min_float_into_array(
  function min_float_correctness (line 196) | fn min_float_correctness() {

FILE: extension/src/nmost/min_int.rs
  type MinIntTransType (line 14) | type MinIntTransType = NMostTransState<i64>;
  function from (line 27) | fn from(item: &mut MinIntTransType) -> Self {
  function min_n_int_trans (line 40) | pub fn min_n_int_trans(
  function min_n_int_rollup_trans (line 56) | pub fn min_n_int_rollup_trans(
  function min_n_int_combine (line 71) | pub fn min_n_int_combine(
  function min_n_int_serialize (line 85) | pub fn min_n_int_serialize(state: Internal) -> bytea {
  function min_n_int_deserialize (line 91) | pub fn min_n_int_deserialize(bytes: bytea, _internal: Internal) -> Optio...
  function min_n_int_final (line 97) | pub fn min_n_int_final(state: Internal) -> MinInts<'static> {
  function min_n_int_to_array (line 102) | pub fn min_n_int_to_array(agg: MinInts<'static>) -> Vec<i64> {
  function min_n_int_to_values (line 107) | pub fn min_n_int_to_values(agg: MinInts<'static>) -> SetOfIterator<'stat...
  function arrow_min_int_into_values (line 113) | pub fn arrow_min_int_into_values(
  function arrow_min_int_into_array (line 121) | pub fn arrow_min_int_into_array(agg: MinInts<'static>, _accessor: Access...
  function min_int_correctness (line 180) | fn min_int_correctness() {

FILE: extension/src/nmost/min_time.rs
  type MinTimeTransType (line 14) | type MinTimeTransType = NMostTransState<pg_sys::TimestampTz>;
  function from (line 27) | fn from(item: &mut MinTimeTransType) -> Self {
  function min_n_time_trans (line 40) | pub fn min_n_time_trans(
  function min_n_time_rollup_trans (line 56) | pub fn min_n_time_rollup_trans(
  function min_n_time_combine (line 71) | pub fn min_n_time_combine(
  function min_n_time_serialize (line 85) | pub fn min_n_time_serialize(state: Internal) -> bytea {
  function min_n_time_deserialize (line 91) | pub fn min_n_time_deserialize(bytes: bytea, _internal: Internal) -> Opti...
  function min_n_time_final (line 97) | pub fn min_n_time_final(state: Internal) -> MinTimes<'static> {
  function min_n_time_to_array (line 102) | pub fn min_n_time_to_array(agg: MinTimes<'static>) -> Vec<crate::raw::Ti...
  function min_n_time_to_values (line 111) | pub fn min_n_time_to_values(
  function arrow_min_time_into_values (line 124) | pub fn arrow_min_time_into_values(
  function arrow_min_time_into_array (line 132) | pub fn arrow_min_time_into_array(
  function min_time_correctness (line 194) | fn min_time_correctness() {

FILE: extension/src/palloc.rs
  function in_memory_context (line 9) | pub unsafe fn in_memory_context<T, F: FnOnce() -> T>(mctx: pg_sys::Memor...
  type InternalAsValue (line 27) | pub unsafe trait InternalAsValue {
    method to_inner (line 29) | unsafe fn to_inner<T>(self) -> Option<Inner<T>>;
    method to_inner (line 42) | unsafe fn to_inner<T>(self) -> Option<Inner<T>> {
  type ToInternal (line 53) | pub unsafe trait ToInternal {
    method internal (line 54) | fn internal(self) -> Option<Internal>;
    method internal (line 74) | fn internal(self) -> Option<Internal> {
    method internal (line 80) | fn internal(self) -> Option<Internal> {
    method internal (line 93) | fn internal(self) -> Option<Internal> {
    method internal (line 99) | fn internal(self) -> Option<Internal> {
  type Inner (line 57) | pub struct Inner<T>(pub NonNull<T>);
  type Target (line 60) | type Target = T;
  method deref (line 62) | fn deref(&self) -> &Self::Target {
  method deref_mut (line 68) | fn deref_mut(&mut self) -> &mut Self::Target {
  function from (line 86) | fn from(t: T) -> Self {
  type PanickingAllocator (line 127) | struct PanickingAllocator;
  method alloc (line 133) | unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
  method dealloc (line 141) | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
  method alloc_zeroed (line 145) | unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
  method realloc (line 153) | unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) ...

FILE: extension/src/pg_any_element.rs
  type PgAnyElement (line 14) | pub struct PgAnyElement {
    method deep_copy_datum (line 27) | pub fn deep_copy_datum(&self) -> Datum {
    method from (line 83) | fn from(other: (Datum, Oid)) -> Self {
    method from (line 90) | fn from(other: AnyElement) -> Self {
  method eq (line 34) | fn eq(&self, other: &Self) -> bool {
  method hash (line 77) | fn hash<H: Hasher>(&self, state: &mut H) {
  type PgAnyElementHashMap (line 98) | pub struct PgAnyElementHashMap<V>(pub(crate) HashMap<PgAnyElement, V, Da...
  function new (line 101) | pub fn new(typoid: Oid, collation: Option<Oid>) -> Self {
  function with_hasher (line 107) | pub(crate) fn with_hasher(hasher: DatumHashBuilder) -> Self {
  function typoid (line 111) | pub fn typoid(&self) -> Oid {
  function contains_key (line 116) | pub fn contains_key(&self, k: &PgAnyElement) -> bool {
  function get (line 119) | pub fn get(&self, k: &PgAnyElement) -> Option<&V> {
  function get_mut (line 122) | pub fn get_mut(&mut self, k: &PgAnyElement) -> Option<&mut V> {
  function hasher (line 125) | pub(crate) fn hasher(&self) -> &DatumHashBuilder {
  function insert (line 128) | pub fn insert(&mut self, k: PgAnyElement, v: V) -> Option<V> {
  function len (line 131) | pub fn len(&self) -> usize {
  function remove (line 134) | pub fn remove(&mut self, k: &PgAnyElement) -> Option<V> {

FILE: extension/src/range.rs
  type tstzrange (line 10) | pub type tstzrange = *mut pg_sys::varlena;
  function get_range (line 15) | pub unsafe fn get_range(range: tstzrange) -> Option<I64Range> {
  function get_toasted_bytes (line 46) | unsafe fn get_toasted_bytes(ptr: &pg_sys::varlena) -> &[u8] {
  constant RANGE_EMPTY (line 55) | const RANGE_EMPTY: u8 = 0x01;
  constant RANGE_LB_INC (line 56) | const RANGE_LB_INC: u8 = 0x02;
  constant RANGE_UB_INC (line 57) | const RANGE_UB_INC: u8 = 0x04;
  constant RANGE_LB_INF (line 58) | const RANGE_LB_INF: u8 = 0x08;
  constant RANGE_UB_INF (line 59) | const RANGE_UB_INF: u8 = 0x10;
  constant RANGE_LB_NULL (line 60) | const RANGE_LB_NULL: u8 = 0x20;
  constant RANGE_UB_NULL (line 61) | const RANGE_UB_NULL: u8 = 0x40;
  function range_has_lbound (line 63) | fn range_has_lbound(flags: u8) -> bool {
  function lbound_inclusive (line 67) | fn lbound_inclusive(flags: u8) -> bool {
  function range_has_rbound (line 71) | fn range_has_rbound(flags: u8) -> bool {
  function rbound_inclusive (line 74) | fn rbound_inclusive(flags: u8) -> bool {
  method to_i64range (line 106) | pub fn to_i64range(&self) -> Option<I64Range> {
  method from_i64range (line 116) | pub fn from_i64range(b: Option<I64Range>) -> Self {

FILE: extension/src/raw.rs
  type bytea (line 93) | pub struct bytea(pub pg_sys::Datum);
    method box_into (line 98) | unsafe fn box_into<'fcx>(
  type text (line 107) | pub struct text(pub pg_sys::Datum);
  type TimestampTz (line 111) | pub struct TimestampTz(pub pg_sys::Datum);
    method box_into (line 120) | unsafe fn box_into<'fcx>(
    method from (line 135) | fn from(ts: pg_sys::TimestampTz) -> Self {
  function from (line 129) | fn from(tstz: TimestampTz) -> Self {
  type AnyElement (line 140) | pub struct AnyElement(pub pg_sys::Datum);
  type tstzrange (line 144) | pub struct tstzrange(pub pg_sys::Datum);
  type Interval (line 148) | pub struct Interval(pub pg_sys::Datum);
    method box_into (line 153) | unsafe fn box_into<'fcx>(
    method from (line 162) | fn from(interval: i64) -> Self {
  type regproc (line 191) | pub struct regproc(pub pg_sys::Datum);

FILE: extension/src/saturation.rs
  function saturating_add (line 7) | fn saturating_add(x: i32, y: i32) -> i32 {
  function saturating_add_pos (line 13) | fn saturating_add_pos(x: i32, y: i32) -> i32 {
  function saturating_sub (line 25) | fn saturating_sub(x: i32, y: i32) -> i32 {
  function saturating_sub_pos (line 31) | fn saturating_sub_pos(x: i32, y: i32) -> i32 {
  function saturating_mul (line 41) | fn saturating_mul(x: i32, y: i32) -> i32 {
  function test_saturating_add_max (line 53) | fn test_saturating_add_max() {
  function test_saturating_add_min (line 59) | fn test_saturating_add_min() {
  function test_saturating_add_pos (line 65) | fn test_saturating_add_pos() {
  function test_saturating_sub_max (line 71) | fn test_saturating_sub_max() {
  function test_saturating_sub_min (line 77) | fn test_saturating_sub_min() {
  function test_saturating_sub_pos (line 83) | fn test_saturating_sub_pos() {
  function test_saturating_mul_max (line 89) | fn test_saturating_mul_max() {
  function test_saturating_mul_min (line 95) | fn test_saturating_mul_min() {

FILE: extension/src/serialization.rs
  function _ts_toolkit_encode_timestamptz (line 19) | pub extern "C" fn _ts_toolkit_encode_timestamptz(
  function _ts_toolkit_decode_timestamptz (line 57) | pub extern "C" fn _ts_toolkit_decode_timestamptz(text: &str) -> i64 {
  type EncodedStr (line 143) | pub enum EncodedStr<'s> {
  function str_to_db_encoding (line 148) | pub fn str_to_db_encoding(s: &str) -> EncodedStr<'_> {
  function str_from_db_encoding (line 169) | pub fn str_from_db_encoding(s: &CStr) -> &str {
  function default_padding (line 185) | pub(crate) fn default_padding() -> [u8; 3] {
  function default_header (line 189) | pub(crate) fn default_header() -> u32 {

FILE: extension/src/serialization/collations.rs
  type PgCollationId (line 23) | pub struct PgCollationId(pub Oid);
    method is_invalid (line 28) | pub fn is_invalid(&self) -> bool {
    method to_option_oid (line 32) | pub fn to_option_oid(self) -> Option<Oid> {
    method deserialize (line 167) | fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
  constant Anum_pg_collation_oid (line 42) | const Anum_pg_collation_oid: u32 = 1;
  constant DEFAULT_COLLATION_OID (line 45) | pub(crate) const DEFAULT_COLLATION_OID: Oid = unsafe { Oid::from_u32_unc...
  type FormData_pg_collation (line 50) | struct FormData_pg_collation {
  type Form_pg_collation (line 63) | type Form_pg_collation = *mut FormData_pg_collation;
  type FormData_pg_database (line 68) | struct FormData_pg_database {
  type Form_pg_database (line 78) | type Form_pg_database = *mut FormData_pg_database;
  method serialize (line 103) | fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
  function get_struct (line 248) | unsafe fn get_struct<T>(tuple: pg_sys::HeapTuple) -> *mut T {
  constant COLLATION_ID_950 (line 263) | const COLLATION_ID_950: PgCollationId =
  constant COLLATION_ID_951 (line 266) | const COLLATION_ID_951: PgCollationId =
  function test_pg_collation_id_serialize_default_collation_ron (line 271) | fn test_pg_collation_id_serialize_default_collation_ron() {
  function test_pg_collation_id_serialize_c_collation (line 287) | fn test_pg_collation_id_serialize_c_collation() {
  function test_pg_collation_id_serialize_c_collation_ron (line 302) | fn test_pg_collation_id_serialize_c_collation_ron() {
  function test_pg_collation_id_serialize_posix_collation (line 310) | fn test_pg_collation_id_serialize_posix_collation() {
  function test_pg_collation_id_serialize_posix_collation_ron (line 325) | fn test_pg_collation_id_serialize_posix_collation_ron() {

FILE: extension/src/serialization/functions.rs
  type PgProcId (line 19) | pub struct PgProcId(pub Oid);
    method deserialize (line 47) | fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
  function format_procedure_qualified (line 26) | pub fn format_procedure_qualified(procedure_oid: pg_sys::Oid) -> *const ...
  method serialize (line 30) | fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>

FILE: extension/src/serialization/types.rs
  type ShortTypeId (line 20) | pub struct ShortTypeId(pub Oid);
    method from (line 25) | fn from(id: Oid) -> Self {
    method deserialize (line 46) | fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
  method from (line 31) | fn from(id: ShortTypeId) -> Self {
  method serialize (line 37) | fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
  type ShortTypIdSerializer (line 57) | enum ShortTypIdSerializer {
    method from_oid (line 105) | pub fn from_oid(oid: Oid) -> Self {
    method to_oid (line 154) | pub fn to_oid(&self) -> Oid {
  type PgTypId (line 208) | pub struct PgTypId(pub Oid);
    method deserialize (line 259) | fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
  method serialize (line 211) | fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
  function get_struct (line 310) | unsafe fn get_struct<T>(tuple: pg_sys::HeapTuple) -> *mut T {
  function test_pg_type_id_serialize_char_type (line 328) | fn test_pg_type_id_serialize_char_type() {
  function test_pg_type_id_serialize_char_type_ron (line 342) | fn test_pg_type_id_serialize_char_type_ron() {
  function test_pg_type_id_serialize_bool_type (line 350) | fn test_pg_type_id_serialize_bool_type() {
  function test_pg_type_id_serialize_bool_type_ron (line 363) | fn test_pg_type_id_serialize_bool_type_ron() {
  function test_short_type_id_serialize_char_type (line 371) | fn test_short_type_id_serialize_char_type() {
  function test_short_type_id_serialize_char_type_ron (line 379) | fn test_short_type_id_serialize_char_type_ron() {
  function test_short_type_id_serialize_bool_type (line 387) | fn test_short_type_id_serialize_bool_type() {
  function test_short_type_id_serialize_bool_type_ron (line 395) | fn test_short_type_id_serialize_bool_type_ron() {
  function test_short_type_id_serialize_circle_type (line 403) | fn test_short_type_id_serialize_circle_type() {
  function test_short_type_id_serialize_circle_type_ron (line 417) | fn test_short_type_id_serialize_circle_type_ron() {

FILE: extension/src/stabilization_tests.rs
  function test_schema_qualification (line 14) | fn test_schema_qualification() {
  function stable_functions (line 153) | fn stable_functions() -> HashSet<String> {
  function stable_types (line 157) | fn stable_types() -> HashSet<String> {
  function stable_operators (line 161) | fn stable_operators() -> HashSet<String> {

FILE: extension/src/state_aggregate.rs
  type MaterializedState (line 37) | enum MaterializedState {
    method entry (line 42) | fn entry(&self, states: &mut String) -> StateEntry {
    method existing_entry (line 48) | fn existing_entry(&self, states: &str) -> StateEntry {
    method into_string (line 55) | fn into_string(self) -> String {
    method into_integer (line 61) | fn into_integer(self) -> i64 {
  type StateEntry (line 74) | pub struct StateEntry {
    method from_integer (line 80) | fn from_integer(int: i64) -> Self {
    method from_str (line 86) | fn from_str(states: &mut String, new_state: &str) -> Self {
    method from_existing_str (line 99) | fn from_existing_str(states: &str, state: &str) -> Self {
    method try_from_existing_str (line 106) | fn try_from_existing_str(states: &str, state: &str) -> Option<Self> {
    method materialize (line 116) | fn materialize(&self, states: &str) -> MaterializedState {
    method as_str (line 129) | fn as_str(self, states: &str) -> &str {
    method into_integer (line 136) | fn into_integer(self) -> i64 {
  function empty (line 165) | pub(super) fn empty(compact: bool, integer_states: bool) -> Self {
  function new (line 184) | pub(super) fn new(
  function get (line 250) | pub fn get(&self, state: StateEntry) -> Option<i64> {
  function get_materialized (line 253) | pub(super) fn get_materialized(&self, state: &MaterializedState) -> Opti...
  function states_as_str (line 262) | pub(super) fn states_as_str(&self) -> &str {
  function interpolate (line 268) | pub(super) fn interpolate(
  function assert_int (line 399) | pub fn assert_int<'a>(&self) {
  function assert_str (line 405) | pub fn assert_str<'a>(&self) {
  function new (line 423) | pub fn new(compact_state_agg: CompactStateAgg) -> Self {
  function empty (line 431) | pub fn empty(integer_states: bool) -> Self {
  function as_compact_state_agg (line 435) | pub fn as_compact_state_agg(self) -> toolkit_experimental::CompactStateA...
  function assert_int (line 439) | pub fn assert_int<'a>(&self) {
  function assert_str (line 445) | pub fn assert_str<'a>(&self) {
  function state_trans_inner (line 454) | fn state_trans_inner(
  type State (line 470) | type State = CompactStateAggTransState;
  constant PARALLEL_SAFE (line 472) | const PARALLEL_SAFE: bool = true;
  function transition (line 474) | fn transition(
  function combine (line 482) | fn combine(a: Option<&State>, b: Option<&State>) -> Option<State> {
  function serialize (line 494) | fn serialize(state: &mut State) -> bytea {
  function deserialize (line 498) | fn deserialize(bytes: bytea) -> State {
  function finally (line 502) | fn finally(state: Option<&mut State>) -> Option<CompactStateAgg<'static>> {
  function compact_state_agg_int_trans (line 541) | fn compact_state_agg_int_trans(
  type State (line 575) | type State = CompactStateAggTransState;
  constant PARALLEL_SAFE (line 577) | const PARALLEL_SAFE: bool = true;
  method transition (line 579) | fn transition(
  method combine (line 587) | fn combine(a: Option<&State>, b: Option<&State>) -> Option<State> {
  method serialize (line 591) | fn serialize(state: &mut State) -> bytea {
  method deserialize (line 595) | fn deserialize(bytes: bytea) -> State {
  method finally (line 599) | fn finally(state: Option<&mut State>) -> Option<StateAgg<'static>> {
  function state_agg_int_trans (line 667) | fn state_agg_int_trans(
  type CompactStateAggTransState (line 701) | pub struct CompactStateAggTransState {
    method new (line 707) | fn new(integer_states: bool) -> Self {
    method record (line 714) | fn record(&mut self, state: MaterializedState, time: i64) {
    method append (line 718) | fn append(&mut self, other: &mut Self) {
    method sort_records (line 722) | fn sort_records(&mut self) {
    method make_duration_map_and_bounds (line 742) | fn make_duration_map_and_bounds(
  function duration_in_inner (line 763) | fn duration_in_inner<'a>(
  function duration_in (line 809) | pub fn duration_in<'a>(agg: Option<CompactStateAgg<'a>>, state: String) ...
  function duration_in_int (line 823) | pub fn duration_in_int<'a>(agg: Option<CompactStateAgg<'a>>, state: i64)...
  function duration_in_tl (line 831) | pub fn duration_in_tl<'a>(agg: Option<StateAgg<'a>>, state: String) -> c...
  function duration_in_tl_int (line 839) | pub fn duration_in_tl_int<'a>(agg: Option<StateAgg<'a>>, state: i64) -> ...
  function arrow_state_agg_duration_in_string (line 851) | pub fn arrow_state_agg_duration_in_string<'a>(
  function arrow_state_agg_duration_in_int (line 862) | pub fn arrow_state_agg_duration_in_int<'a>(
  function duration_in_range (line 871) | pub fn duration_in_range<'a>(
  function duration_in_range_int (line 891) | pub fn duration_in_range_int<'a>(
  constant NO_INTERVAL_MARKER (line 911) | const NO_INTERVAL_MARKER: i64 = i64::MIN;
  function range_tuple (line 912) | fn range_tuple(start: i64, interval: i64) -> (i64, Option<i64>) {
  function arrow_state_agg_duration_in_range_string (line 924) | pub fn arrow_state_agg_duration_in_range_string<'a>(
  function arrow_state_agg_duration_in_range_int (line 939) | pub fn arrow_state_agg_duration_in_range_int<'a>(
  function interpolated_duration_in_inner (line 951) | fn interpolated_duration_in_inner<'a>(
  function interpolated_duration_in (line 988) | pub fn interpolated_duration_in<'a>(
  function interpolated_duration_in_tl (line 1009) | pub fn interpolated_duration_in_tl<'a>(
  function interpolated_duration_in_int (line 1034) | pub fn interpolated_duration_in_int<'a>(
  function interpolated_duration_in_tl_int (line 1055) | pub fn interpolated_duration_in_tl_int<'a>(
  function arrow_state_agg_interpolated_duration_in_string (line 1075) | pub fn arrow_state_agg_interpolated_duration_in_string<'a>(
  function arrow_state_agg_interpolated_duration_in_int (line 1096) | pub fn arrow_state_agg_interpolated_duration_in_int<'a>(
  function duration_in_bad_args_inner (line 1114) | fn duration_in_bad_args_inner() -> ! {
  function duration_in_bad_args (line 1125) | pub fn duration_in_bad_args<'a>(
  function duration_in_int_bad_args (line 1140) | pub fn duration_in_int_bad_args<'a>(
  function into_values (line 1150) | pub fn into_values<'a>(
  function into_int_values (line 1169) | pub fn into_int_values<'a>(
  function into_values_tl (line 1189) | pub fn into_values_tl<'a>(
  function into_values_tl_int (line 1202) | pub fn into_values_tl_int<'a>(
  function arrow_state_agg_into_values (line 1216) | pub fn arrow_state_agg_into_values<'a>(
  function arrow_state_agg_into_int_values (line 1230) | pub fn arrow_state_agg_into_int_values<'a>(
  function state_timeline_inner (line 1243) | fn state_timeline_inner<'a>(
  function state_int_timeline_inner (line 1271) | fn state_int_timeline_inner<'a>(
  function state_timeline (line 1302) | pub fn state_timeline<'a>(
  function state_int_timeline (line 1316) | pub fn state_int_timeline<'a>(
  function arrow_state_agg_state_timeline (line 1332) | pub fn arrow_state_agg_state_timeline<'a>(
  function arrow_state_agg_state_int_timeline (line 1347) | pub fn arrow_state_agg_state_int_timeline<'a>(
  function interpolated_state_timeline_inner (line 1361) | fn interpolated_state_timeline_inner<'a>(
  function interpolated_state_int_timeline_inner (line 1392) | fn interpolated_state_int_timeline_inner<'a>(
  function interpolated_state_timeline (line 1424) | pub fn interpolated_state_timeline<'a>(
  function interpolated_state_int_timeline (line 1441) | pub fn interpolated_state_int_timeline<'a>(
  function arrow_state_agg_interpolated_state_timeline (line 1459) | pub fn arrow_state_agg_interpolated_state_timeline<'a>(
  function arrow_state_agg_interpolated_state_int_timeline (line 1483) | pub fn arrow_state_agg_interpolated_state_int_timeline<'a>(
  function state_periods_inner (line 1506) | fn state_periods_inner<'a>(
  function state_periods (line 1539) | pub fn state_periods<'a>(
  function state_int_periods (line 1554) | pub fn state_int_periods<'a>(
  function arrow_state_agg_state_periods_string (line 1573) | pub fn arrow_state_agg_state_periods_string<'a>(
  function arrow_state_agg_state_periods_int (line 1590) | pub fn arrow_state_agg_state_periods_int<'a>(
  function interpolated_state_periods_inner (line 1604) | fn interpolated_state_periods_inner<'a>(
  function interpolated_state_periods (line 1629) | pub fn interpolated_state_periods<'a>(
  function interpolated_state_periods_int (line 1655) | pub fn interpolated_state_periods_int<'a>(
  function arrow_state_agg_interpolated_state_periods_string (line 1682) | pub fn arrow_state_agg_interpolated_state_periods_string<'a>(
  function arrow_state_agg_interpolated_state_periods_int (line 1709) | pub fn arrow_state_agg_interpolated_state_periods_int<'a>(
  function state_at_inner (line 1733) | fn state_at_inner<'a>(agg: StateAgg<'a>, point: i64) -> Option<Materiali...
  function state_at (line 1752) | fn state_at<'a>(agg: StateAgg<'a>, point: TimestampTz) -> Option<String> {
  function state_at_int (line 1758) | fn state_at_int<'a>(agg: StateAgg<'a>, point: TimestampTz) -> Option<i64> {
  function arrow_state_agg_state_at_string (line 1765) | pub fn arrow_state_agg_state_at_string<'a>(
  function arrow_state_agg_state_at_int (line 1774) | pub fn arrow_state_agg_state_at_int<'a>(
  type DurationInState (line 1783) | pub struct DurationInState {
  type TimeInState (line 1790) | pub struct TimeInState {
  type DurationState (line 1796) | struct DurationState {
    method new (line 1801) | fn new() -> Self {
    method handle_record (line 1808) | fn handle_record(&mut self, state: MaterializedState, time: i64) {
    method finalize (line 1830) | fn finalize(&mut self) {
  type Record (line 1838) | struct Record {
  function duration_in_misuse_error (line 1865) | fn duration_in_misuse_error() {
  function one_state_one_change (line 1882) | fn one_state_one_change() {
  function two_states_two_changes (line 1917) | fn two_states_two_changes() {
  function two_states_three_changes (line 1954) | fn two_states_three_changes() {
  function out_of_order_times (line 2009) | fn out_of_order_times() {
  function same_state_twice (line 2047) | fn same_state_twice() {
  function duration_in_two_states_two_changes (line 2086) | fn duration_in_two_states_two_changes() {
  function same_state_twice_last (line 2122) | fn same_state_twice_last() {
  function combine_using_muchos_data (line 2150) | fn combine_using_muchos_data() {
  function combine_using_settings (line 2187) | fn combine_using_settings() {
  function sample_query (line 2233) | fn sample_query() {
  function interpolated_duration (line 2282) | fn interpolated_duration() {
  function two_states_at_one_time (line 2462) | fn two_states_at_one_time() {
  function interpolate_introduces_state (line 2494) | fn interpolate_introduces_state() {
  function text_serialization (line 2627) | fn text_serialization() {
  function combine (line 2669) | fn combine() {
  function binary_serialization_integer (line 2698) | fn binary_serialization_integer() {
  function binary_serialization_string (line 2735) | fn binary_serialization_string() {

FILE: extension/src/state_aggregate/accessors.rs
  function accessor_state_agg_interpolated_interpolated_state_timeline (line 20) | fn accessor_state_agg_interpolated_interpolated_state_timeline<'a>(
  function accessor_state_agg_interpolated_interpolated_state_int_timeline (line 46) | fn accessor_state_agg_interpolated_interpolated_state_int_timeline<'a>(
  function accessor_state_agg_interpolated_interpolated_duration_in (line 87) | fn accessor_state_agg_interpolated_interpolated_duration_in<'a>(
  function accessor_state_agg_interpolated_interpolated_duration_in_int (line 106) | fn accessor_state_agg_interpolated_interpolated_duration_in_int<'a>(
  function accessor_state_agg_interpolated_interpolated_state_periods (line 150) | fn accessor_state_agg_interpolated_interpolated_state_periods<'a>(
  function accessor_state_agg_interpolated_interpolated_state_periods_int (line 169) | fn accessor_state_agg_interpolated_interpolated_state_periods_int<'a>(
  function accessor_state_agg_duration_in (line 202) | fn accessor_state_agg_duration_in(state: String) -> AccessorDurationIn<'...
  function accessor_state_agg_duration_in_int (line 211) | fn accessor_state_agg_duration_in_int(state: i64) -> AccessorDurationInI...
  function accessor_state_agg_state_periods (line 234) | fn accessor_state_agg_state_periods<'a>(state: String) -> AccessorStateP...
  function accessor_state_agg_state_periods_int (line 243) | fn accessor_state_agg_state_periods_int(state: i64) -> AccessorStatePeri...
  function accessor_state_agg_duration_in_range (line 271) | fn accessor_state_agg_duration_in_range(
  function accessor_state_agg_duration_in_range_int (line 290) | fn accessor_state_agg_duration_in_range_int(
  function accessor_state_agg_state_at (line 315) | fn accessor_state_agg_state_at(time: TimestampTz) -> AccessorStateAt {
  function accessor_state_agg_state_at_int (line 331) | fn accessor_state_agg_state_at_int(time: TimestampTz) -> AccessorStateAt...

FILE: extension/src/state_aggregate/rollup.rs
  type RollupTransState (line 54) | pub struct RollupTransState {
    method merge (line 267) | fn merge(&mut self) {
  type OwnedCompactStateAgg (line 60) | struct OwnedCompactStateAgg {
    method merge (line 73) | pub fn merge(self, other: Self) -> Self {
    method from (line 238) | fn from(agg: CompactStateAgg<'a>) -> OwnedCompactStateAgg {
  function from (line 217) | fn from(owned: OwnedCompactStateAgg) -> CompactStateAgg<'a> {
  method partial_cmp (line 254) | fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
  method cmp (line 260) | fn cmp(&self, other: &Self) -> Ordering {
  function compact_state_agg_rollup_trans (line 280) | pub fn compact_state_agg_rollup_trans(
  function compact_state_agg_rollup_trans_inner (line 288) | pub fn compact_state_agg_rollup_trans_inner(
  function state_agg_rollup_trans (line 313) | pub fn state_agg_rollup_trans(
  function compact_state_agg_rollup_final (line 327) | fn compact_state_agg_rollup_final(
  function compact_state_agg_rollup_final_inner (line 334) | fn compact_state_agg_rollup_final_inner(
  function state_agg_rollup_final (line 353) | fn state_agg_rollup_final(
  function state_agg_rollup_final_inner (line 360) | fn state_agg_rollup_final_inner(
  function state_agg_rollup_serialize (line 379) | pub fn state_agg_rollup_serialize(state: Internal) -> bytea {
  function state_agg_rollup_deserialize (line 386) | pub fn state_agg_rollup_deserialize(bytes: bytea, _internal: Internal) -...
  function state_agg_rollup_deserialize_inner (line 389) | pub fn state_agg_rollup_deserialize_inner(bytes: bytea) -> Inner<RollupT...
  function state_agg_rollup_combine (line 395) | pub fn state_agg_rollup_combine(
  function state_agg_rollup_combine_inner (line 406) | pub fn state_agg_rollup_combine_inner(
  function merge_range_full_overlap (line 443) | fn merge_range_full_overlap() {
  function merge_range_partial_overlap (line 457) | fn merge_range_partial_overlap() {
  function merges_compact_aggs_correctly (line 470) | fn merges_compact_aggs_correctly() {

FILE: extension/src/stats_agg.rs
  type StatsSummary1DTF (line 26) | type StatsSummary1DTF = InternalStatsSummary1D<TwoFloat>;
  type StatsSummary2DTF (line 27) | type StatsSummary2DTF = InternalStatsSummary2D<TwoFloat>;
  method to_internal (line 60) | fn to_internal(&self) -> InternalStatsSummary1D<f64> {
  method from_internal (line 69) | pub fn from_internal(st: InternalStatsSummary1D<f64>) -> Self {
  method to_internal (line 81) | fn to_internal(&self) -> InternalStatsSummary2D<f64> {
  method from_internal (line 95) | fn from_internal(st: InternalStatsSummary2D<f64>) -> Self {
  function stats1d_trans_serialize (line 112) | pub fn stats1d_trans_serialize(state: Internal) -> bytea {
  function stats1d_trans_deserialize (line 119) | pub fn stats1d_trans_deserialize(bytes: bytea, _internal: Internal) -> O...
  function stats1d_trans_deserialize_inner (line 122) | pub fn stats1d_trans_deserialize_inner(bytes: bytea) -> Inner<StatsSumma...
  function stats2d_trans_serialize (line 128) | pub fn stats2d_trans_serialize(state: Internal) -> bytea {
  function stats2d_trans_deserialize (line 135) | pub fn stats2d_trans_deserialize(bytes: bytea, _internal: Internal) -> O...
  function stats2d_trans_deserialize_inner (line 138) | pub fn stats2d_trans_deserialize_inner(bytes: bytea) -> Inner<StatsSumma...
  function stats1d_trans (line 144) | pub fn stats1d_trans(
  function stats1d_tf_trans (line 152) | pub fn stats1d_tf_trans(
  function stats1d_trans_inner (line 159) | pub fn stats1d_trans_inner(
  function stats1d_tf_trans_inner (line 186) | pub fn stats1d_tf_trans_inner(
  function stats2d_trans (line 215) | pub fn stats2d_trans(
  function stats2d_trans_inner (line 223) | pub fn stats2d_trans_inner(
  function stats2d_tf_trans (line 259) | pub fn stats2d_tf_trans(
  function stats2d_tf_trans_inner (line 267) | pub fn stats2d_tf_trans_inner(
  function stats1d_inv_trans (line 306) | pub fn stats1d_inv_trans(
  function stats1d_inv_trans_inner (line 313) | pub fn stats1d_inv_trans_inner(
  function stats1d_tf_inv_trans (line 332) | pub fn stats1d_tf_inv_trans(
  function stats1d_tf_inv_trans_inner (line 339) | pub fn stats1d_tf_inv_trans_inner(
  function stats2d_inv_trans (line 358) | pub fn stats2d_inv_trans(
  function stats2d_inv_trans_inner (line 366) | pub fn stats2d_inv_trans_inner(
  function stats2d_tf_inv_trans (line 393) | pub fn stats2d_tf_inv_trans(
  function stats2d_tf_inv_trans_inner (line 401) | pub fn stats2d_tf_inv_trans_inner(
  function stats1d_summary_trans (line 431) | pub fn stats1d_summary_trans(
  function stats1d_summary_trans_inner (line 438) | pub fn stats1d_summary_trans_inner(
  function stats2d_summary_trans (line 459) | pub fn stats2d_summary_trans(
  function stats2d_summary_trans_inner (line 466) | pub fn stats2d_summary_trans_inner(
  function stats1d_summary_inv_trans (line 487) | pub fn stats1d_summary_inv_trans(
  function stats1d_summary_inv_trans_inner (line 494) | pub fn stats1d_summary_inv_trans_inner(
  function stats2d_summary_inv_trans (line 514) | pub fn stats2d_summary_inv_trans(
  function stats2d_summary_inv_trans_inner (line 521) | pub fn stats2d_summary_inv_trans_inner(
  function stats1d_combine (line 541) | pub fn stats1d_combine(
  function stats1d_combine_inner (line 548) | pub fn stats1d_combine_inner(
  function stats2d_combine (line 575) | pub fn stats2d_combine(
  function stats2d_combine_inner (line 582) | pub fn stats2d_combine_inner(
  function stats1d_final (line 609) | fn stats1d_final(state: Internal, fcinfo: pg_sys::FunctionCallInfo) -> O...
  function stats1d_tf_final (line 622) | fn stats1d_tf_final(
  function stats2d_final (line 642) | fn stats2d_final(state: Internal, fcinfo: pg_sys::FunctionCallInfo) -> O...
  function stats2d_tf_final (line 655) | fn stats2d_tf_final(state: Internal, fcinfo: pg_sys::FunctionCallInfo) -...
  function arrow_stats1d_average (line 930) | pub fn arrow_stats1d_average(sketch: StatsSummary1D, _accessor: Accessor...
  function stats1d_average (line 935) | pub(crate) fn stats1d_average(summary: StatsSummary1D) -> Option<f64> {
  function arrow_stats1d_sum (line 941) | pub fn arrow_stats1d_sum(sketch: StatsSummary1D, _accessor: AccessorSum)...
  function stats1d_sum (line 946) | pub(crate) fn stats1d_sum(summary: StatsSummary1D) -> Option<f64> {
  function arrow_stats1d_stddev (line 952) | pub fn arrow_stats1d_stddev(
  function stats1d_stddev (line 960) | fn stats1d_stddev(
  function arrow_stats1d_variance (line 972) | pub fn arrow_stats1d_variance(
  function stats1d_variance (line 980) | fn stats1d_variance(
  function arrow_stats1d_skewness (line 992) | pub fn arrow_stats1d_skewness(sketch: StatsSummary1D, accessor: Accessor...
  function stats1d_skewness (line 997) | fn stats1d_skewness(summary: StatsSummary1D, method: default!(&str, "'sa...
  function arrow_stats1d_kurtosis (line 1006) | pub fn arrow_stats1d_kurtosis(sketch: StatsSummary1D, accessor: Accessor...
  function stats1d_kurtosis (line 1011) | fn stats1d_kurtosis(summary: StatsSummary1D, method: default!(&str, "'sa...
  function arrow_stats1d_num_vals (line 1020) | pub fn arrow_stats1d_num_vals(sketch: StatsSummary1D, _accessor: Accesso...
  function stats1d_num_vals (line 1025) | fn stats1d_num_vals(summary: StatsSummary1D) -> i64 {
  function arrow_stats2d_average_x (line 1031) | pub fn arrow_stats2d_average_x(sketch: StatsSummary2D, _accessor: Access...
  function stats2d_average_x (line 1036) | fn stats2d_average_x(summary: StatsSummary2D) -> Option<f64> {
  function arrow_stats2d_average_y (line 1042) | pub fn arrow_stats2d_average_y(sketch: StatsSummary2D, _accessor: Access...
  function stats2d_average_y (line 1047) | fn stats2d_average_y(summary: StatsSummary2D) -> Option<f64> {
  function arrow_stats2d_sum_x (line 1053) | pub fn arrow_stats2d_sum_x(sketch: StatsSummary2D, _accessor: AccessorSu...
  function stats2d_sum_x (line 1058) | fn stats2d_sum_x(summary: StatsSummary2D) -> Option<f64> {
  function arrow_stats2d_sum_y (line 1064) | pub fn arrow_stats2d_sum_y(sketch: StatsSummary2D, _accessor: AccessorSu...
  function stats2d_sum_y (line 1069) | fn stats2d_sum_y(summary: StatsSummary2D) -> Option<f64> {
  function arrow_stats2d_stdddev_x (line 1075) | pub fn arrow_stats2d_stdddev_x(
  function stats2d_stddev_x (line 1083) | fn stats2d_stddev_x(
  function arrow_stats2d_stdddev_y (line 1095) | pub fn arrow_stats2d_stdddev_y(
  function stats2d_stddev_y (line 1103) | fn stats2d_stddev_y(
  function arrow_stats2d_variance_x (line 1115) | pub fn arrow_stats2d_variance_x(
  function stats2d_variance_x (line 1123) | fn stats2d_variance_x(
  function arrow_stats2d_variance_y (line 1135) | pub fn arrow_stats2d_variance_y(
  function stats2d_variance_y (line 1143) | fn stats2d_variance_y(
  function arrow_stats2d_skewness_x (line 1155) | pub fn arrow_stats2d_skewness_x(
  function stats2d_skewness_x (line 1163) | fn stats2d_skewness_x(summary: StatsSummary2D, method: default!(&str, "'...
  function arrow_stats2d_skewness_y (line 1172) | pub fn arrow_stats2d_skewness_y(
  function stats2d_skewness_y (line 1180) | fn stats2d_skewness_y(summary: StatsSummary2D, method: default!(&str, "'...
  function arrow_stats2d_kurtosis_x (line 1189) | pub fn arrow_stats2d_kurtosis_x(
  function stats2d_kurtosis_x (line 1197) | fn stats2d_kurtosis_x(summary: StatsSummary2D, method: default!(&str, "'...
  function arrow_stats2d_kurtosis_y (line 1206) | pub fn arrow_stats2d_kurtosis_y(
  function stats2d_kurtosis_y (line 1214) | fn stats2d_kurtosis_y(summary: StatsSummary2D, method: default!(&str, "'...
  function arrow_stats2d_num_vals (line 1223) | pub fn arrow_stats2d_num_vals(sketch: StatsSummary2D, _accessor: Accesso...
  function stats2d_num_vals (line 1228) | fn stats2d_num_vals(summary: StatsSummary2D) -> i64 {
  function arrow_stats2d_slope (line 1234) | pub fn arrow_stats2d_slope(sketch: StatsSummary2D, _accessor: AccessorSl...
  function stats2d_slope (line 1239) | fn stats2d_slope(summary: StatsSummary2D) -> Option<f64> {
  function arrow_stats2d_corr (line 1245) | pub fn arrow_stats2d_corr(sketch: StatsSummary2D, _accessor: AccessorCor...
  function stats2d_corr (line 1250) | fn stats2d_corr(summary: StatsSummary2D) -> Option<f64> {
  function arrow_stats2d_intercept (line 1256) | pub fn arrow_stats2d_intercept(
  function stats2d_intercept (line 1264) | fn stats2d_intercept(summary: StatsSummary2D) -> Option<f64> {
  function arrow_stats2d_x_intercept (line 1270) | pub fn arrow_stats2d_x_intercept(
  function stats2d_x_intercept (line 1278) | fn stats2d_x_intercept(summary: StatsSummary2D) -> Option<f64> {
  function arrow_stats2d_determination_coeff (line 1284) | pub fn arrow_stats2d_determination_coeff(
  function stats2d_determination_coeff (line 1292) | fn stats2d_determination_coeff(summary: StatsSummary2D) -> Option<f64> {
  function arrow_stats2d_covar (line 1298) | pub fn arrow_stats2d_covar(sketch: Option<StatsSummary2D>, accessor: Acc...
  function stats2d_covar (line 1303) | fn stats2d_covar(
  type Method (line 1317) | pub enum Method {
    method as_str (line 1323) | pub fn as_str(&self) -> &'static str {
  function method_kind (line 1332) | pub fn method_kind(method: &str) -> Method {
  function as_method (line 1341) | pub fn as_method(method: &str) -> Option<Method> {
  constant RUNS (line 1404) | const RUNS: usize = 10;
  constant VALS (line 1405) | const VALS: usize = 10000;
  constant SEED (line 1406) | const SEED: Option<u64> = None;
  constant PRINT_VALS (line 1407) | const PRINT_VALS: bool = false;
  function test_stats_agg_text_io (line 1410) | fn test_stats_agg_text_io() {
  function test_stats_agg_byte_io (line 1583) | fn test_stats_agg_byte_io() {
  function stats_agg_fuzz (line 1612) | fn stats_agg_fuzz() {
  type TestState (line 1621) | struct TestState {
    method new (line 1632) | pub fn new(runs: usize, values: usize, seed: Option<u64>) -> TestState {
    method populate_values (line 1649) | pub fn populate_values(&mut self) {
    method failed_msg (line 1672) | pub fn failed_msg(&self, dump_vals: bool) -> String {
  function check_agg_equivalence (line 1684) | fn check_agg_equivalence(
  function pg1d_aggx (line 1746) | fn pg1d_aggx(agg: &str) -> String {
  function pg1d_aggy (line 1750) | fn pg1d_aggy(agg: &str) -> String {
  function pg2d_agg (line 1754) | fn pg2d_agg(agg: &str) -> String {
  function tk1d_agg (line 1758) | fn tk1d_agg(agg: &str) -> String {
  function tk1d_agg_arg (line 1768) | fn tk1d_agg_arg(agg: &str, arg: &str) -> String {
  function tk2d_agg (line 1778) | fn tk2d_agg(agg: &str) -> String {
  function tk2d_agg_arg (line 1788) | fn tk2d_agg_arg(agg: &str, arg: &str) -> String {
  function pg_moment_pop_query (line 1798) | fn pg_moment_pop_query(moment: i32, column: &str) -> String {
  function pg_moment_samp_query (line 1802) | fn pg_moment_samp_query(moment: i32, column: &str) -> String {
  function test_aggs (line 1806) | fn test_aggs(state: &mut TestState) {
  function stats_agg_rolling (line 2221) | fn stats_agg_rolling() {

FILE: extension/src/tdigest.rs
  function tdigest_trans (line 21) | pub fn tdigest_trans(
  function tdigest_trans_inner (line 29) | pub fn tdigest_trans_inner(
  function tdigest_combine (line 60) | pub fn tdigest_combine(
  function tdigest_combine_inner (line 68) | pub fn tdigest_combine_inner(
  function tdigest_serialize (line 90) | pub fn tdigest_serialize(state: Internal) -> bytea {
  function tdigest_deserialize (line 100) | pub fn tdigest_deserialize(bytes: bytea, _internal: Internal) -> Option<...
  function tdigest_deserialize_inner (line 103) | pub fn tdigest_deserialize_inner(bytes: bytea) -> Inner<tdigest::Builder> {
  method output (line 124) | fn output(&self, buffer: &mut StringInfo) {
  method input (line 134) | fn input(input: &std::ffi::CStr) -> TDigest<'input>
  function to_internal_tdigest (line 152) | fn to_internal_tdigest(&self) -> InternalTDigest {
  function from_internal_tdigest (line 163) | fn from_internal_tdigest(digest: &InternalTDigest) -> TDigest<'static> {
  function tdigest_final (line 186) | fn tdigest_final(state: Internal, fcinfo: pg_sys::FunctionCallInfo) -> O...
  function tdigest_compound_trans (line 223) | pub fn tdigest_compound_trans(
  function tdigest_compound_trans_inner (line 230) | pub fn tdigest_compound_trans_inner(
  function tdigest_compound_combine (line 255) | pub fn tdigest_compound_combine(
  function tdigest_compound_combine_inner (line 264) | pub fn tdigest_compound_combine_inner(
  function tdigest_compound_final (line 290) | fn tdigest_compound_final(
  function tdigest_compound_serialize (line 299) | fn tdigest_compound_serialize(state: Internal, _fcinfo: pg_sys::Function...
  function tdigest_compound_deserialize (line 305) | pub fn tdigest_compound_deserialize(bytes: bytea, _internal: Internal) -...
  function arrow_tdigest_approx_percentile (line 338) | pub fn arrow_tdigest_approx_percentile<'a>(
  function tdigest_quantile (line 347) | pub fn tdigest_quantile<'a>(quantile: f64, digest: TDigest<'a>) -> f64 {
  function arrow_tdigest_approx_rank (line 353) | pub fn arrow_tdigest_approx_rank<'a>(
  function tdigest_quantile_at_value (line 362) | pub fn tdigest_quantile_at_value<'a>(value: f64, digest: TDigest<'a>) ->...
  function arrow_tdigest_num_vals (line 370) | pub fn arrow_tdigest_num_vals<'a>(sketch: TDigest<'a>, _accessor: Access...
  function tdigest_count (line 376) | pub fn tdigest_count<'a>(digest: TDigest<'a>) -> f64 {
  function arrow_tdigest_min (line 382) | pub fn arrow_tdigest_min<'a>(sketch: TDigest<'a>, _accessor: AccessorMin...
  function tdigest_min (line 388) | pub fn tdigest_min<'a>(digest: TDigest<'a>) -> f64 {
  function arrow_tdigest_max (line 394) | pub fn arrow_tdigest_max<'a>(sketch: TDigest<'a>, _accessor: AccessorMax...
  function tdigest_max (line 400) | pub fn tdigest_max<'a>(digest: TDigest<'a>) -> f64 {
  function arrow_tdigest_mean (line 406) | pub fn arrow_tdigest_mean<'a>(sketch: TDigest<'a>, _accessor: AccessorMe...
  function tdigest_mean (line 413) | pub fn tdigest_mean<'a>(digest: TDigest<'a>) -> f64 {
  function tdigest_sum (line 423) | pub fn tdigest_sum(digest: TDigest<'_>) -> f64 {
  function apx_eql (line 435) | fn apx_eql(value: f64, expected: f64, error: f64) {
  function pct_eql (line 443) | fn pct_eql(value: f64, expected: f64, pct_error: f64) {
  function test_tdigest_aggregate (line 448) | fn test_tdigest_aggregate() {
  function test_tdigest_small_count (line 583) | fn test_tdigest_small_count() {
  function serialization_matches (line 605) | fn serialization_matches() {
  function test_tdigest_io (line 618) | fn test_tdigest_io() {
  function test_tdigest_byte_io (line 652) | fn test_tdigest_byte_io() {
  function test_tdigest_compound_agg (line 684) | fn test_tdigest_compound_agg() {

FILE: extension/src/time_vector.rs
  constant FLAG_IS_SORTED (line 28) | pub const FLAG_IS_SORTED: u8 = 0x01;
  constant FLAG_HAS_NULLS (line 29) | pub const FLAG_HAS_NULLS: u8 = 0x01 << 1;
  function num_points (line 46) | pub fn num_points(&self) -> usize {
  function get (line 52) | pub fn get(&self, index: usize) -> Option<TSPoint> {
  function is_sorted (line 61) | pub fn is_sorted(&self) -> bool {
  function has_nulls (line 66) | pub fn has_nulls(&self) -> bool {
  function is_null_val (line 70) | pub fn is_null_val(&self, index: usize) -> bool {
  function clone_owned (line 79) | fn clone_owned(&self) -> Timevector_TSTZ_F64<'static> {
  function iter (line 85) | pub fn iter(&self) -> Iter<'_> {
  function num_vals (line 91) | pub fn num_vals(&self) -> usize {
  type Item (line 97) | type Item = TSPoint;
  type IntoIter (line 98) | type IntoIter = Iter<'a>;
  method into_iter (line 100) | fn into_iter(self) -> Self::IntoIter {
  function unnest (line 112) | pub fn unnest<'a>(
  function timestamptz_to_string (line 124) | pub fn timestamptz_to_string(time: pg_sys::TimestampTz) -> Result<String...
  function to_plotly (line 131) | pub fn to_plotly<'a>(series: Timevector_TSTZ_F64<'a>) -> String {
  function to_text (line 136) | pub fn to_text<'a>(series: Timevector_TSTZ_F64<'a>, format_string: Strin...
  function format_timevector (line 140) | pub fn format_timevector<'a>(series: Timevector_TSTZ_F64<'a>, format_str...
  function arrow_timevector_unnest (line 183) | pub fn arrow_timevector_unnest<'a>(
  function timevector_serialize (line 191) | pub fn timevector_serialize(state: Internal) -> bytea {
  function timevector_deserialize (line 198) | pub fn timevector_deserialize(bytes: bytea, _internal: Internal) -> Opti...
  function timevector_tstz_f64_trans (line 204) | pub fn timevector_tstz_f64_trans(
  function timevector_trans_inner (line 213) | pub fn timevector_trans_inner(
  function timevector_tstz_f64_compound_trans (line 264) | pub fn timevector_tstz_f64_compound_trans<'a>(
  function inner_compound_trans (line 272) | pub fn inner_compound_trans<'b>(
  function timevector_combine (line 291) | pub fn timevector_combine(
  function inner_combine (line 299) | pub fn inner_combine<'a, 'b>(
  function combine (line 314) | pub fn combine(
  function timevector_final (line 366) | pub fn timevector_final(
  function timevector_final_inner (line 373) | pub fn timevector_final_inner<'a>(
  function asof_join (line 439) | pub fn asof_join<'a, 'b>(
  function accessor_asof (line 496) | pub fn accessor_asof<'a>(tv: Timevector_TSTZ_F64<'a>) -> AccessorAsof<'s...
  function arrow_timevector_asof (line 509) | pub fn arrow_timevector_asof<'a>(
  function test_unnest (line 530) | pub fn test_unnest() {
  function test_format_timevector (line 586) | pub fn test_format_timevector() {
  function test_format_timevector_panics_on_infinities (line 665) | pub fn test_format_timevector_panics_on_infinities() {
  function timevector_io (line 707) | pub fn timevector_io() {
  function test_arrow_equivalence (line 774) | pub fn test_arrow_equivalence() {
  function test_rollup (line 825) | pub fn test_rollup() {
  function test_rollup_preserves_nulls_flag (line 902) | fn test_rollup_preserves_nulls_flag() {
  function test_asof_join (line 960) | fn test_asof_join() {
  function test_asof_none (line 1004) | fn test_asof_none() {
  function test_none_asof (line 1024) | fn test_none_asof() {

FILE: extension/src/time_vector/iter.rs
  type Iter (line 5) | pub enum Iter<'a> {
  type Item (line 12) | type Item = TSPoint;
  method next (line 14) | fn next(&mut self) -> Option<Self::Item> {
  method size_hint (line 24) | fn size_hint(&self) -> (usize, Option<usize>) {
  method count (line 30) | fn count(self) -> usize

FILE: extension/src/time_vector/pipeline.rs
  function flatten (line 83) | pub fn flatten<'a>(self) -> UnstableTimevectorPipeline<'a> {
  function from (line 100) | fn from(element: Element<'e>) -> Self {
  function arrow_run_pipeline (line 115) | pub fn arrow_run_pipeline<'a>(
  function run_pipeline_elements (line 122) | pub fn run_pipeline_elements<'s, 'j, 'i>(
  function execute_pipeline_element (line 132) | pub fn execute_pipeline_element<'s>(
  function arrow_add_unstable_element (line 152) | pub fn arrow_add_unstable_element<'p>(
  function pipeline_support (line 167) | pub unsafe fn pipeline_support(input: Internal) -> Internal {
  function pipeline_support_helper (line 181) | pub(crate) unsafe fn pipeline_support_helper(
  function no_change (line 283) | fn no_change() -> pgrx::Internal {
  function lttb_pipeline_element (line 308) | pub fn lttb_pipeline_element(
  function test_pipeline_lttb (line 324) | fn test_pipeline_lttb() {
  function test_pipeline_folding (line 467) | fn test_pipeline_folding() {

FILE: extension/src/time_vector/pipeline/aggregation.rs
  function arrow_run_pipeline_then_stats_agg (line 106) | pub fn arrow_run_pipeline_then_stats_agg<'a>(
  function finalize_with_stats_agg (line 122) | pub fn finalize_with_stats_agg<'e>(
  function pipeline_stats_agg (line 154) | pub fn pipeline_stats_agg() -> toolkit_experimental::PipelineThenStatsAg...
  function pipeline_stats_agg_support (line 164) | pub unsafe fn pipeline_stats_agg_support(input: Internal) -> Internal {
  function sum_pipeline_element (line 192) | pub fn sum_pipeline_element<'a>(
  function arrow_pipeline_then_sum (line 216) | pub fn arrow_pipeline_then_sum<'a>(
  function finalize_with_sum (line 233) | pub fn finalize_with_sum<'e>(
  function pipeline_sum_support (line 260) | pub unsafe fn pipeline_sum_support(input: Internal) -> Internal {
  function average_pipeline_element (line 280) | pub fn average_pipeline_element(
  function arrow_pipeline_then_average (line 308) | pub fn arrow_pipeline_then_average<'a>(
  function finalize_with_average (line 325) | pub fn finalize_with_average<'e>(
  function pipeline_average_support (line 352) | pub unsafe fn pipeline_average_support(input: Internal) -> Internal {
  function num_vals_pipeline_element (line 377) | pub fn num_vals_pipeline_element(
  function arrow_pipeline_then_num_vals (line 405) | pub fn arrow_pipeline_then_num_vals<'a>(
  function finalize_with_num_vals (line 414) | pub fn finalize_with_num_vals<'e>(
  function pipeline_num_vals_support (line 441) | pub unsafe fn pipeline_num_vals_support(input: Internal) -> Internal {
  function arrow_run_pipeline_then_counter_agg (line 463) | pub fn arrow_run_pipeline_then_counter_agg<'a>(
  function finalize_with_counter_agg (line 484) | pub fn finalize_with_counter_agg<'e>(
  function pipeline_counter_agg (line 516) | pub fn pipeline_counter_agg() -> toolkit_experimental::PipelineThenCount...
  function pipeline_counter_agg_support (line 526) | pub unsafe fn pipeline_counter_agg_support(input: Internal) -> Internal {
  function arrow_run_pipeline_then_hyperloglog (line 553) | pub fn arrow_run_pipeline_then_hyperloglog<'a>(
  function finalize_with_hyperloglog (line 569) | pub fn finalize_with_hyperloglog<'e>(
  function pipeline_hyperloglog (line 603) | pub fn pipeline_hyperloglog(size: i32) -> toolkit_experimental::Pipeline...
  function pipeline_hyperloglog_support (line 614) | pub unsafe fn pipeline_hyperloglog_support(input: Internal) -> Internal {
  function arrow_run_pipeline_then_percentile_agg (line 641) | pub fn arrow_run_pipeline_then_percentile_agg<'a>(
  function finalize_with_percentile_agg (line 650) | pub fn finalize_with_percentile_agg<'e>(
  function pipeline_percentile_agg (line 682) | pub fn pipeline_percentile_agg() -> toolkit_experimental::PipelineThenPe...
  function pipeline_percentile_agg_support (line 692) | pub unsafe fn pipeline_percentile_agg_support(input: Internal) -> Intern...
  function test_stats_agg_finalizer (line 724) | fn test_stats_agg_finalizer() {
  function test_stats_agg_pipeline_folding (line 770) | fn test_stats_agg_pipeline_folding() {
  function test_sum_finalizer (line 820) | fn test_sum_finalizer() {
  function test_sum_pipeline_folding (line 863) | fn test_sum_pipeline_folding() {
  function test_average_finalizer (line 913) | fn test_average_finalizer() {
  function test_average_pipeline_folding (line 956) | fn test_average_pipeline_folding() {
  function test_num_vals_finalizer (line 1006) | fn test_num_vals_finalizer() {
  function test_num_vals_pipeline_folding (line 1049) | fn test_num_vals_pipeline_folding() {
  function test_counter_agg_finalizer (line 1099) | fn test_counter_agg_finalizer() {
  function test_hyperloglog_finalizer (line 1180) | fn test_hyperloglog_finalizer() {
  function test_percentile_agg_finalizer (line 1270) | fn test_percentile_agg_finalizer() {
  function test_percentile_agg_pipeline_folding (line 1330) | fn test_percentile_agg_pipeline_folding() {

FILE: extension/src/time_vector/pipeline/arithmetic.rs
  type Function (line 14) | pub enum Function {
  function apply (line 36) | pub fn apply(
  function pipeline_add (line 76) | pub fn pipeline_add(rhs: f64) -> toolkit_experimental::UnstableTimevecto...
  function pipeline_sub (line 86) | pub fn pipeline_sub(rhs: f64) -> toolkit_experimental::UnstableTimevecto...
  function pipeline_mul (line 96) | pub fn pipeline_mul(rhs: f64) -> toolkit_experimental::UnstableTimevecto...
  function pipeline_div (line 106) | pub fn pipeline_div(rhs: f64) -> toolkit_experimental::UnstableTimevecto...
  function pipeline_mod (line 116) | pub fn pipeline_mod(rhs: f64) -> toolkit_experimental::UnstableTimevecto...
  function pipeline_power (line 126) | pub fn pipeline_power(rhs: f64) -> toolkit_experimental::UnstableTimevec...
  function pipeline_log_n (line 141) | pub fn pipeline_log_n(rhs: f64) -> toolkit_experimental::UnstableTimevec...
  function pipeline_abs (line 159) | pub fn pipeline_abs() -> toolkit_experimental::UnstableTimevectorPipelin...
  function pipeline_cbrt (line 173) | pub fn pipeline_cbrt() -> toolkit_experimental::UnstableTimevectorPipeli...
  function pipeline_ceil (line 187) | pub fn pipeline_ceil() -> toolkit_experimental::UnstableTimevectorPipeli...
  function pipeline_floor (line 201) | pub fn pipeline_floor() -> toolkit_experimental::UnstableTimevectorPipel...
  function pipeline_ln (line 210) | pub fn pipeline_ln() -> toolkit_experimental::UnstableTimevectorPipeline...
  function pipeline_log10 (line 224) | pub fn pipeline_log10() -> toolkit_experimental::UnstableTimevectorPipel...
  function pipeline_round (line 238) | pub fn pipeline_round() -> toolkit_experimental::UnstableTimevectorPipel...
  function pipeline_sign (line 252) | pub fn pipeline_sign() -> toolkit_experimental::UnstableTimevectorPipeli...
  function pipeline_sqrt (line 266) | pub fn pipeline_sqrt() -> toolkit_experimental::UnstableTimevectorPipeli...
  function pipeline_trunc (line 280) | pub fn pipeline_trunc() -> toolkit_experimental::UnstableTimevectorPipel...
  function test_simple_arith_binops (line 295) | fn test_simple_arith_binops() {
  function test_simple_arith_unaryops (line 473) | fn test_simple_arith_unaryops() {

FILE: extension/src/time_vector/pipeline/delta.rs
  function delta_pipeline_element (line 14) | pub fn delta_pipeline_element(
  function timevector_delta (line 31) | pub fn timevector_delta<'s>(series: &Timevector_TSTZ_F64<'s>) -> Timevec...
  function test_pipeline_delta (line 69) | fn test_pipeline_delta() {

FILE: extension/src/time_vector/pipeline/expansion.rs
  function pipeline_unnest (line 45) | pub fn pipeline_unnest() -> toolkit_experimental::PipelineThenUnnest<'st...
  function arrow_finalize_with_unnest (line 56) | pub fn arrow_finalize_with_unnest<'p>(
  function arrow_run_pipeline_then_unnest (line 84) | pub fn arrow_run_pipeline_then_unnest<'a>(
  function pipeline_series (line 100) | pub fn pipeline_series() -> toolkit_experimental::PipelineForceMateriali...
  function arrow_force_materialize (line 111) | pub fn arrow_force_materialize<'e>(
  function arrow_run_pipeline_then_materialize (line 139) | pub fn arrow_run_pipeline_then_materialize<'a>(
  function pipeline_materialize_support (line 147) | pub unsafe fn pipeline_materialize_support(input: pgrx::Internal) -> pgr...
  function test_unnest_finalizer (line 176) | fn test_unnest_finalizer() {
  function test_series_finalizer (line 222) | fn test_series_finalizer() {
  function test_force_materialize (line 274) | fn test_force_materialize() {

FILE: extension/src/time_vector/pipeline/fill_to.rs
  type FillToMethod (line 12) | pub enum FillToMethod {
    method fill_point (line 19) | pub fn fill_point(&self, lhs: &TSPoint, rhs: &TSPoint, target_ts: i64)...
  function fillto_pipeline_element (line 58) | pub fn fillto_pipeline_element(
  function fill_to (line 85) | pub fn fill_to<'s>(
  function test_pipeline_fill_to (line 148) | fn test_pipeline_fill_to() {

FILE: extension/src/time_vector/pipeline/filter.rs
  function filter_lambda_pipeline_element (line 12) | pub fn filter_lambda_pipeline_element<'l>(
  function apply_lambda_to (line 26) | pub fn apply_lambda_to<'a>(
  function filter_lambda_over_series (line 51) | pub fn filter_lambda_over_series(
  function test_pipeline_filter_lambda (line 66) | fn test_pipeline_filter_lambda() {

FILE: extension/src/time_vector/pipeline/lambda.rs
  method output (line 35) | fn output(&self, buffer: &mut StringInfo) {
  method input (line 45) | fn input(input: &std::ffi::CStr) -> Self
  function parse (line 66) | pub fn parse(&self) -> Expression {
  function bool_lambda (line 76) | pub fn bool_lambda<'a>(
  function f64_lambda (line 90) | pub fn f64_lambda<'a>(
  function ttz_lambda (line 104) | pub fn ttz_lambda<'a>(
  function interval_lambda (line 119) | pub fn interval_lambda<'a>(
  function point_lambda (line 133) | pub fn point_lambda<'a>(
  function trace_lambda (line 152) | pub fn trace_lambda<'a>(
  type Expression (line 180) | pub struct Expression {
    method ty (line 272) | pub fn ty(&self) -> &Type {
    method ty_is_ts_point (line 276) | pub fn ty_is_ts_point(&self) -> bool {
  type ExpressionSegment (line 186) | pub enum ExpressionSegment {
    method ty (line 282) | pub fn ty(&self) -> &Type {
    method ty_is_ts_point (line 299) | pub fn ty_is_ts_point(&self) -> bool {
    method name (line 308) | pub fn name(&self) -> Cow<'static, str> {
  type UnaryOp (line 200) | pub enum UnaryOp {
  type BinOp (line 206) | pub enum BinOp {
  type Function (line 223) | pub enum Function {
  type Type (line 253) | pub enum Type {
  type Value (line 263) | pub enum Value {
    method bool (line 326) | pub(crate) fn bool(&self) -> bool {
    method float (line 333) | pub(crate) fn float(&self) -> f64 {
    method time (line 340) | pub(crate) fn time(&self) -> i64 {
    method interval (line 347) | pub(crate) fn interval(&self) -> *mut pg_sys::Interval {
    method from (line 428) | fn from(b: bool) -> Self {
    method from (line 434) | fn from(f: f64) -> Self {
  method partial_cmp (line 356) | fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
  method eq (line 397) | fn eq(&self, other: &Self) -> bool {
  function into_data (line 440) | pub fn into_data(self) -> LambdaData<'a> {
  function test_lambda_general (line 562) | fn test_lambda_general() {
  function test_lambda_comparison (line 728) | fn test_lambda_comparison() {
  function test_lambda_function (line 780) | fn test_lambda_function() {
  function test_lambda_unary (line 831) | fn test_lambda_unary() {
  function test_lambda_interval_ops (line 864) | fn test_lambda_interval_ops() {
  function test_lambda_variable (line 895) | fn test_lambda_variable() {

FILE: extension/src/time_vector/pipeline/lambda/executor.rs
  type ExpressionExecutor (line 5) | pub struct ExpressionExecutor<'e, T> {
  function new (line 12) | pub fn new(exprs: &'e Expression) -> Self {
  function with_fn_tracer (line 21) | pub fn with_fn_tracer(exprs: &'e Expression, tracer: T) -> Self
  function with_tracer (line 28) | pub fn with_tracer(exprs: &'e Expression, tracer: T) -> Self {
  function reset (line 36) | pub fn reset(&mut self) {
  function exec (line 42) | pub fn exec(&mut self, value: f64, time: i64) -> Value {
  function exec_expression (line 46) | fn exec_expression(
  function force_var (line 80) | fn force_var(&mut self, i: usize, value: f64, time: i64) -> Value {
  function exec_function (line 90) | fn exec_function(
  function exec_unary_op (line 145) | fn exec_unary_op(
  function exec_binary_op (line 173) | fn exec_binary_op(
  type Tracer (line 361) | pub trait Tracer {
    method trace (line 362) | fn trace(&mut self, expr: &ExpressionSegment, result: &Value);
    method trace (line 366) | fn trace(&mut self, _: &ExpressionSegment, _: &Value) {}
    method trace (line 373) | fn trace(&mut self, expr: &ExpressionSegment, result: &Value) {

FILE: extension/src/time_vector/pipeline/lambda/parser.rs
  type ExpressionParser (line 33) | pub struct ExpressionParser;
  function parse_expression (line 35) | pub fn parse_expression(input: &str) -> Expression {
  function build_expression (line 44) | fn build_expression<'a>(
  function parse_primary (line 64) | fn parse_primary<'a>(
  function build_binary_op (line 215) | fn build_binary_op(
  function parse_timestamptz (line 366) | fn parse_timestamptz(val: &str) -> i64 {
  function parse_interval (line 388) | fn parse_interval(val: &str) -> *mut pg_sys::Interval {

FILE: extension/src/time_vector/pipeline/map.rs
  function map_lambda_pipeline_element (line 19) | pub fn map_lambda_pipeline_element<'l>(
  function apply_lambda_to (line 33) | pub fn apply_lambda_to<'a>(
  function map_lambda_over_series (line 65) | pub fn map_lambda_over_series(
  function map_series_pipeline_element (line 89) | pub fn map_series_pipeline_element(
  function map_series_element (line 95) | pub fn map_series_element<'a>(function: crate::raw::regproc) -> Element<...
  function check_user_function_type (line 105) | pub fn check_user_function_type(function: pg_sys::regproc) {
  function apply_to_series (line 124) | pub fn apply_to_series(
  function map_data_pipeline_element (line 154) | pub fn map_data_pipeline_element(
  function apply_to (line 185) | pub fn apply_to(
  function map_series (line 230) | pub fn map_series(series: &mut Timevector_TSTZ_F64<'_>, mut func: impl F...
  function test_pipeline_map_lambda (line 256) | fn test_pipeline_map_lambda() {
  function test_pipeline_map_lambda2 (line 339) | fn test_pipeline_map_lambda2() {
  function test_pipeline_map_data (line 439) | fn test_pipeline_map_data() {
  function test_pipeline_map_series (line 531) | fn test_pipeline_map_series() {
  function test_pipeline_map_series_failure (line 624) | fn test_pipeline_map_series_failure() {
  function test_pipeline_map_series_null (line 682) | fn test_pipeline_map_series_null() {
  function test_map_io (line 738) | fn test_map_io() {

FILE: extension/src/time_vector/pipeline/sort.rs
  function sort_pipeline_element (line 12) | pub fn sort_pipeline_element<'p>() -> toolkit_experimental::UnstableTime...
  function sort_timevector (line 16) | pub fn sort_timevector(mut series: Timevector_TSTZ_F64<'_>) -> Timevecto...
  function test_pipeline_sort (line 67) | fn test_pipeline_sort() {

FILE: extension/src/time_weighted_average.rs
  method internal (line 42) | fn internal(&self) -> TimeWeightSummaryInternal {
  method interpolate (line 51) | pub(super) fn interpolate(
  type TimeWeightTransState (line 115) | pub struct TimeWeightTransState {
    method push_point (line 123) | fn push_point(&mut self, value: TSPoint) {
    method combine_points (line 127) | fn combine_points(&mut self) {
    method push_summary (line 139) | fn push_summary(&mut self, other: &TimeWeightTransState) {
    method combine_summaries (line 146) | fn combine_summaries(&mut self) {
  function time_weight_trans_serialize (line 158) | pub fn time_weight_trans_serialize(state: Internal) -> bytea {
  function time_weight_trans_deserialize (line 165) | pub fn time_weight_trans_deserialize(bytes: bytea, _internal: Internal) ...
  function time_weight_trans_deserialize_inner (line 168) | pub fn time_weight_trans_deserialize_inner(bytes: bytea) -> Inner<TimeWe...
  function time_weight_trans (line 175) | pub fn time_weight_trans(
  function time_weight_trans_inner (line 185) | pub fn time_weight_trans_inner(
  function time_weight_summary_trans (line 225) | pub fn time_weight_summary_trans(
  function time_weight_summary_trans_inner (line 233) | pub fn time_weight_summary_trans_inner(
  function time_weight_combine (line 264) | pub fn time_weight_combine(
  function time_weight_combine_inner (line 272) | pub fn time_weight_combine_inner(
  function time_weight_final (line 305) | fn time_weight_final(
  function time_weight_final_inner (line 312) | fn time_weight_final_inner(
  function arrow_time_weight_first_val (line 338) | pub fn arrow_time_weight_first_val(sketch: TimeWeightSummary, _accessor:...
  function time_weight_first_val (line 343) | fn time_weight_first_val(summary: TimeWeightSummary) -> f64 {
  function arrow_time_weight_last_val (line 349) | pub fn arrow_time_weight_last_val(sketch: TimeWeightSummary, _accessor: ...
  function time_weight_last_val (line 354) | fn time_weight_last_val(summary: TimeWeightSummary) -> f64 {
  function arrow_time_weight_first_time (line 360) | pub fn arrow_time_weight_first_time(
  function time_weight_first_time (line 368) | fn time_weight_first_time(summary: TimeWeightSummary) -> crate::raw::Tim...
  function arrow_time_weight_last_time (line 374) | pub fn arrow_time_weight_last_time(
  function time_weight_last_time (line 382) | fn time_weight_last_time(summary: TimeWeightSummary) -> crate::raw::Time...
  function arrow_time_weighted_average_average (line 423) | pub fn arrow_time_weighted_average_average(
  function arrow_time_weighted_average_integral (line 432) | pub fn arrow_time_weighted_average_integral(
  function time_weighted_average_average (line 443) | pub fn time_weighted_average_average(tws: Option<TimeWeightSummary>) -> ...
  function time_weighted_average_integral (line 461) | pub fn time_weighted_average_integral(
  function interpolate (line 476) | fn interpolate(
  function time_weighted_average_interpolated_average (line 493) | pub fn time_weighted_average_interpolated_average(
  function arrow_time_weighted_average_interpolated_average (line 506) | pub fn arrow_time_weighted_average_interpolated_average(
  function time_weighted_average_interpolated_integral (line 531) | pub fn time_weighted_average_interpolated_integral(
  function arrow_time_weighted_average_interpolated_integral (line 545) | pub fn arrow_time_weighted_average_interpolated_integral(
  function test_time_weight_aggregate (line 599) | fn test_time_weight_aggregate() {
  function test_time_weight_io (line 712) | fn test_time_weight_io() {
  function test_time_weight_byte_io (line 796) | fn test_time_weight_byte_io() {
  function test_time_weight_interpolation (line 866) | fn test_time_weight_interpolation() {
  function test_locf_interpolation_to_null (line 1004) | fn test_locf_interpolation_to_null() {

FILE: extension/src/time_weighted_average/accessors.rs
  function time_weight_interpolated_average_accessor (line 26) | fn time_weight_interpolated_average_accessor(
  function time_weight_interpolated_integral_accessor (line 75) | fn time_weight_interpolated_integral_accessor(

FILE: extension/src/type_builder.rs
  type CachedDatum (line 2) | pub enum CachedDatum<'r> {
  method eq (line 9) | fn eq(&self, _: &Self) -> bool {
  function deserialize (line 19) | fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error>
  type SerializationType (line 713) | pub enum SerializationType {

FILE: extension/src/uddsketch.rs
  function uddsketch_trans (line 21) | pub fn uddsketch_trans(
  function uddsketch_trans_inner (line 31) | pub fn uddsketch_trans_inner(
  constant PERCENTILE_AGG_DEFAULT_SIZE (line 55) | const PERCENTILE_AGG_DEFAULT_SIZE: u32 = 200;
  constant PERCENTILE_AGG_DEFAULT_ERROR (line 56) | const PERCENTILE_AGG_DEFAULT_ERROR: f64 = 0.001;
  function percentile_agg_trans (line 61) | pub fn percentile_agg_trans(
  function percentile_agg_trans_inner (line 69) | pub fn percentile_agg_trans_inner(
  function uddsketch_combine (line 80) | pub fn uddsketch_combine(
  function uddsketch_combine_inner (line 87) | pub fn uddsketch_combine_inner(
  function uddsketch_serialize (line 109) | pub fn uddsketch_serialize(state: Internal) -> bytea {
  function uddsketch_deserialize (line 115) | pub fn uddsketch_deserialize(bytes: bytea, _internal: Internal) -> Optio...
  function uddsketch_deserialize_inner (line 118) | pub fn uddsketch_deserialize_inner(bytes: bytea) -> Inner<UddSketchInter...
  type SerializedUddSketch (line 124) | struct SerializedUddSketch {
    method from (line 135) | fn from(sketch: &UddSketchInternal) -> Self {
    method keys (line 168) | fn keys(&self) -> impl Iterator<Item = SketchHashKey> + '_ {
    method counts (line 176) | fn counts(&self) -> impl Iterator<Item = u64> + '_ {
  method from (line 150) | fn from(sketch: SerializedUddSketch) -> Self {
  type ReadableUddSketch (line 208) | struct ReadableUddSketch {
    method from (line 220) | fn from(sketch: &UddSketch<'_>) -> Self {
  function from (line 235) | fn from(sketch: &'a ReadableUddSketch) -> Self {
  method output (line 271) | fn output(&self, buffer: &mut StringInfo) {
  method input (line 281) | fn input(input: &std::ffi::CStr) -> Self
  function keys (line 294) | fn keys(&self) -> impl Iterator<Item = SketchHashKey> + '_ {
  function counts (line 303) | fn counts(&self) -> impl Iterator<Item = u64> + '_ {
  function metadata (line 312) | fn metadata(&self) -> UDDSketchMetadata {
  function to_uddsketch (line 324) | fn to_uddsketch(&self) -> UddSketchInternal {
  function from_internal (line 328) | fn from_internal(state: &UddSketchInternal) -> Self {
  function from_iter (line 362) | fn from_iter<T: IntoIterator<Item = f64>>(iter: T) -> Self {
  function uddsketch_final (line 376) | fn uddsketch_final(
  function uddsketch_final_inner (line 382) | fn uddsketch_final_inner(
  type CompressedBuckets (line 399) | struct CompressedBuckets {
  function compress_buckets (line 407) | fn compress_buckets(buckets: impl Iterator<Item = (SketchHashKey, u64)>)...
  function decompress_keys (line 440) | fn de
Condensed preview — 195 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (2,535K chars).
[
  {
    "path": ".cargo/config",
    "chars": 122,
    "preview": "[build]\n# Postgres symbols won't ve available until runtime\nrustflags = [\"-C\", \"link-args=-Wl,-undefined,dynamic_lookup\""
  },
  {
    "path": ".dockerignore",
    "chars": 109,
    "preview": "**/*.iml\n**/*.o\n**/.DS_Store\n.editorconfig\n.idea\n.vscode\n.vsls.json\n.git\nold-versions\ntarget\ntarget-analyzer\n"
  },
  {
    "path": ".git-blame-ignore-revs",
    "chars": 130,
    "preview": "# Merge and parent commit for cargo fmt changes\nb7433344f90b142094e73e84c332385498db9335\n8b50127c9e4bad1696a68a800ce1ef0"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bug-report.md",
    "chars": 945,
    "preview": "---\nname: Bug Report\nabout: Something is not working as expected\ntitle: ''\nlabels: bug\nassignees: ''\n\n---\n\n**Relevant sy"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature-request.md",
    "chars": 614,
    "preview": "---\nname: Feature Request\nabout: Suggest an idea for this project\ntitle: ''\nlabels: feature-request\nassignees: ''\n\n---\n\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature-stabilization.md",
    "chars": 1102,
    "preview": "---\nname: Feature Stabilization\nabout: Checklist of tasks to move a feature out of experimental\ntitle: ''\nlabels: ''\nass"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/proposed-feature.md",
    "chars": 975,
    "preview": "---\nname: Proposed Feature\nabout: Propose a solution to a problem or wishlist item\ntitle: ''\nlabels: proposed-feature\nas"
  },
  {
    "path": ".github/workflows/add-to-bugs-board.yml",
    "chars": 2232,
    "preview": "name: Add bugs to bugs project\n\n\"on\":\n  issues:\n    types: [opened, labeled]\n  issue_comment:\n    types: [created, edite"
  },
  {
    "path": ".github/workflows/ci.yml",
    "chars": 7715,
    "preview": "name: CI\non:\n  pull_request:\n  push:\n    branches:\n    - main\n    - staging\n    - trying\n  schedule:\n    # TimescaleDB i"
  },
  {
    "path": ".github/workflows/ci_image_build.yml",
    "chars": 1630,
    "preview": "name: Build CI Image\n\non:\n  pull_request:\n    paths:\n      - 'docker/ci/**'\n      - '.github/workflows/ci_image_build.ym"
  },
  {
    "path": ".github/workflows/clippy_rustfmt.yml",
    "chars": 1476,
    "preview": "name: Clippy and rustfmt\non:\n  pull_request:\n  push:\n    branches:\n    - main\n    - staging\n    - trying\n  workflow_disp"
  },
  {
    "path": ".github/workflows/dependency-updates.yml",
    "chars": 3059,
    "preview": "name: Dependency Updates\non:\n  schedule:\n    # Run on the 1st of every month at 9:00 AM UTC\n    - cron: '0 9 1 * *'\n  wo"
  },
  {
    "path": ".github/workflows/packaging.yml",
    "chars": 794,
    "preview": "# Trigger package workflows on release tagging\nname: Build packages\non:\n  push:\n    tags:\n    - \"[0-9]+.[0-9]+.[0-9]+\"\n "
  },
  {
    "path": ".github/workflows/release.yml",
    "chars": 1211,
    "preview": "name: Release\non:\n  workflow_dispatch:\n    inputs:\n      version:\n        description: 'New version number for release'\n"
  },
  {
    "path": ".github/workflows/report_packaging_failures.yml",
    "chars": 1440,
    "preview": "name: Report Build Package Failures\non:\n  workflow_run:\n    workflows: [Build packages, Build CI Image, CI]\n    types: ["
  },
  {
    "path": ".gitignore",
    "chars": 98,
    "preview": ".DS_Store\n/.idea\n/.vscode\n/.vsls.json\n/old-versions\n/target\n*.iml\n/target-analyzer\n/.editorconfig\n"
  },
  {
    "path": "Cargo.toml",
    "chars": 228,
    "preview": "[workspace]\nresolver = \"2\"\n\nmembers = [\n    \"crates/t-digest-lib\",\n    \"extension\",\n    \"tools/post-install\",\n    \"tools"
  },
  {
    "path": "Changelog.md",
    "chars": 35315,
    "preview": "# Toolkit Changelog\n\n## Process for updating this changelog\n\nThis changelog should be updated as part of a PR if the wor"
  },
  {
    "path": "LICENSE",
    "chars": 361,
    "preview": "Unless otherwise Source code in this repository, and any binaries built from\nthis source code, in whole or in part, are "
  },
  {
    "path": "NOTICE",
    "chars": 458,
    "preview": "TimescaleDB-Toolkit (TM)\n\nCopyright (c) 2021-2024  Timescale, Inc. All Rights Reserved.\n\nUnless otherwise stated, source"
  },
  {
    "path": "Readme.md",
    "chars": 4726,
    "preview": "[![CI](https://github.com/timescale/timescaledb-toolkit/actions/workflows/ci.yml/badge.svg?branch=main)](https://github."
  },
  {
    "path": "crates/aggregate_builder/Cargo.toml",
    "chars": 259,
    "preview": "[package]\nname = \"aggregate_builder\"\nversion = \"0.1.0\"\nedition = \"2018\"\n\n[lib]\nproc-macro = true\n\n[dependencies]\nsyn = {"
  },
  {
    "path": "crates/aggregate_builder/Readme.md",
    "chars": 10111,
    "preview": "# Aggregate Builder #\n\nLibrary for building Postgres [aggregate functions](https://www.postgresql.org/docs/current/xaggr"
  },
  {
    "path": "crates/aggregate_builder/src/lib.rs",
    "chars": 35023,
    "preview": "use std::borrow::Cow;\n\nuse proc_macro::TokenStream;\n\nuse proc_macro2::{Span, TokenStream as TokenStream2};\n\nuse quote::{"
  },
  {
    "path": "crates/asap/Cargo.toml",
    "chars": 75,
    "preview": "[package]\nname = \"asap\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[dependencies]\n"
  },
  {
    "path": "crates/asap/src/fft.rs",
    "chars": 7715,
    "preview": "// based on https://github.com/stanford-futuredata/ASAP/blob/8b39db4bc92590cbe5b44ddace9b7bb1d677248b/ASAP-optimized.js\n"
  },
  {
    "path": "crates/asap/src/lib.rs",
    "chars": 25079,
    "preview": "// based on https://github.com/stanford-futuredata/ASAP/blob/8b39db4bc92590cbe5b44ddace9b7bb1d677248b/ASAP-optimized.js\n"
  },
  {
    "path": "crates/count-min-sketch/Cargo.toml",
    "chars": 150,
    "preview": "[package]\nname = \"countminsketch\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[dependencies]\nrand = \"0.8.4\"\nserde = { version = "
  },
  {
    "path": "crates/count-min-sketch/src/lib.rs",
    "chars": 8652,
    "preview": "//! Count-Min Sketch implementation in Rust\n//!\n//! Based on the paper:\n//! <http://dimacs.rutgers.edu/~graham/pubs/pape"
  },
  {
    "path": "crates/count-min-sketch/tests/lib.rs",
    "chars": 1510,
    "preview": "use countminsketch::CountMinSketch;\n\n#[test]\nfn empty_sketch() {\n    let cms = CountMinSketch::with_dim(1, 1);\n    asser"
  },
  {
    "path": "crates/counter-agg/Cargo.toml",
    "chars": 461,
    "preview": "[package]\nname = \"counter-agg\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://doc."
  },
  {
    "path": "crates/counter-agg/src/lib.rs",
    "chars": 14769,
    "preview": "use serde::{Deserialize, Serialize};\nuse stats_agg::{stats2d::StatsSummary2D, XYPair};\nuse std::fmt;\nuse tspoint::TSPoin"
  },
  {
    "path": "crates/counter-agg/src/range.rs",
    "chars": 8930,
    "preview": "use serde::{Deserialize, Serialize};\nuse std::cmp::{max, min};\n\n// we always store ranges as half open, inclusive on lef"
  },
  {
    "path": "crates/counter-agg/src/tests.rs",
    "chars": 21169,
    "preview": "// TODO Move to ../tests/lib.rs\n\nuse crate::range::I64Range;\nuse crate::*;\nuse approx::assert_relative_eq;\nfn to_micro(t"
  },
  {
    "path": "crates/encodings/Cargo.toml",
    "chars": 140,
    "preview": "[package]\nname = \"encodings\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[dependencies]\n\n[dev-dependencies]\nquickcheck = \"1\"\nqui"
  },
  {
    "path": "crates/encodings/src/lib.rs",
    "chars": 9118,
    "preview": "pub mod delta {\n    use crate::zigzag;\n\n    pub fn i64_decoder() -> impl FnMut(i64) -> i64 {\n        let mut prev = 0i64"
  },
  {
    "path": "crates/flat_serialize/Readme.md",
    "chars": 3184,
    "preview": "# Flat Serialize #\n\nA cannonicalization of write-to-pointer style serialization. You write a\ndefinition describing the l"
  },
  {
    "path": "crates/flat_serialize/example_generated.rs",
    "chars": 82731,
    "preview": "#![allow(unused_imports)]\nuse crate as flat_serialize;\n#[derive(Clone, Debug)]\npub struct Basic<'input> {\n    pub header"
  },
  {
    "path": "crates/flat_serialize/flat_serialize/Cargo.toml",
    "chars": 228,
    "preview": "[package]\nname = \"flat_serialize\"\nversion = \"0.1.0\"\nauthors = [\"Joshua Lockerman\"]\nedition = \"2021\"\n\n[dependencies]\norde"
  },
  {
    "path": "crates/flat_serialize/flat_serialize/src/lib.rs",
    "chars": 54207,
    "preview": "use std::{\n    fmt,\n    marker::PhantomData,\n    mem::{align_of, size_of, MaybeUninit},\n    slice,\n};\n#[derive(Debug)]\np"
  },
  {
    "path": "crates/flat_serialize/flat_serialize_macro/Cargo.toml",
    "chars": 306,
    "preview": "[package]\nname = \"flat_serialize_macro\"\nversion = \"0.1.0\"\nauthors = [\"Joshua Lockerman\"]\nedition = \"2021\"\n\n[lib]\nproc-ma"
  },
  {
    "path": "crates/flat_serialize/flat_serialize_macro/src/lib.rs",
    "chars": 53972,
    "preview": "use proc_macro::TokenStream;\n\nuse proc_macro2::TokenStream as TokenStream2;\n\nuse quote::{quote, quote_spanned};\n\nuse syn"
  },
  {
    "path": "crates/flat_serialize/flat_serialize_macro/src/parser.rs",
    "chars": 13755,
    "preview": "use std::{collections::HashSet, ops::Deref};\n\nuse proc_macro2::TokenStream as TokenStream2;\n\nuse syn::{\n    braced,\n    "
  },
  {
    "path": "crates/hyperloglogplusplus/Cargo.toml",
    "chars": 293,
    "preview": "[package]\nname = \"hyperloglogplusplus\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[dependencies]\nserde = { version = \"1.0\", fea"
  },
  {
    "path": "crates/hyperloglogplusplus/src/dense.rs",
    "chars": 15929,
    "preview": "use crate::hyperloglog_data::{\n    BIAS_DATA_OFFSET, BIAS_DATA_VEC, RAW_ESTIMATE_DATA_OFFSET, RAW_ESTIMATE_DATA_VEC,\n   "
  },
  {
    "path": "crates/hyperloglogplusplus/src/hyperloglog_data.rs",
    "chars": 104216,
    "preview": "// based on https://github.com/crepererum/pdatastructs.rs/blob/e4f49e6462187700b9a12e8301df9a72a0c6e58c/src/hyperloglog_"
  },
  {
    "path": "crates/hyperloglogplusplus/src/lib.rs",
    "chars": 12423,
    "preview": "#[cfg(test)]\nextern crate quickcheck;\n#[cfg(test)]\n#[macro_use(quickcheck)]\nextern crate quickcheck_macros;\n\nuse std::{\n"
  },
  {
    "path": "crates/hyperloglogplusplus/src/registers.rs",
    "chars": 10601,
    "preview": "use std::{borrow::Cow, convert::TryInto, debug_assert};\n\n/// array of 6bit registers, of power-of-2 size\n// 24 is the LC"
  },
  {
    "path": "crates/hyperloglogplusplus/src/sparse/varint.rs",
    "chars": 3467,
    "preview": "use std::borrow::Cow;\n\nuse encodings::{delta, prefix_varint};\n\nuse super::Encoded;\n\npub fn decompression_iter<'a>(\n    C"
  },
  {
    "path": "crates/hyperloglogplusplus/src/sparse.rs",
    "chars": 12412,
    "preview": "use std::{\n    cmp::{\n        min,\n        Ordering::{Equal, Greater, Less},\n    },\n    collections::HashSet,\n};\n\nuse cr"
  },
  {
    "path": "crates/scripting-utilities/Readme.md",
    "chars": 621,
    "preview": "# Scripting Utilities #\n\nSmall helper crates for writing scripty code, such as found in tools.\nContains code that's _jus"
  },
  {
    "path": "crates/scripting-utilities/control_file_reader/Cargo.toml",
    "chars": 188,
    "preview": "[package]\nname = \"control_file_reader\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at http"
  },
  {
    "path": "crates/scripting-utilities/control_file_reader/src/lib.rs",
    "chars": 2100,
    "preview": "/// Code to extract info from `timescaledb_toolkit.control`\n/// This crate exists so we have a single source of truth fo"
  },
  {
    "path": "crates/scripting-utilities/postgres_connection_configuration/Cargo.toml",
    "chars": 202,
    "preview": "[package]\nname = \"postgres_connection_configuration\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their defin"
  },
  {
    "path": "crates/scripting-utilities/postgres_connection_configuration/src/lib.rs",
    "chars": 1637,
    "preview": "/// Config utility for connecting to multiple DBs in the same cluster\n// JOSH - I'm not sure if this really warrants a c"
  },
  {
    "path": "crates/stats-agg/Cargo.toml",
    "chars": 520,
    "preview": "[package]\nname = \"stats_agg\"\nversion = \"0.1.0\"\nauthors = [\"davidkohn88 <david@timescale.com>\"]\nedition = \"2021\"\n\n# See m"
  },
  {
    "path": "crates/stats-agg/src/lib.rs",
    "chars": 8336,
    "preview": "// stats is a small statistical regression lib that implements the Youngs-Cramer algorithm and is based on the Postgres "
  },
  {
    "path": "crates/stats-agg/src/stats1d.rs",
    "chars": 18790,
    "preview": "use crate::{m3, m4, FloatLike, StatsError, TwoFloat, INV_FLOATING_ERROR_THRESHOLD};\nuse serde::{Deserialize, Serialize};"
  },
  {
    "path": "crates/stats-agg/src/stats2d/stats2d_flat_serialize.rs",
    "chars": 16503,
    "preview": "use super::*;\n\n// expanded from FlatSerializable derive macro and made to work right with generic arg\n#[allow(warnings, "
  },
  {
    "path": "crates/stats-agg/src/stats2d.rs",
    "chars": 33929,
    "preview": "// 2D stats are based on the Youngs-Cramer implementation in PG here:\n// https://github.com/postgres/postgres/blob/472e5"
  },
  {
    "path": "crates/t-digest/Cargo.toml",
    "chars": 437,
    "preview": "[package]\nname = \"tdigest\"\nversion = \"0.2.2\"\nedition = \"2021\"\n# based on: https://github.com/MnO2/t-digest\"\n\n[dependenci"
  },
  {
    "path": "crates/t-digest/src/lib.rs",
    "chars": 34363,
    "preview": "// Based on https://github.com/MnO2/t-digest/blob/master/src/lib.rs\n// as of commit 66d7c19d32c1547daa628f1d9f12178a686b"
  },
  {
    "path": "crates/t-digest-lib/Cargo.toml",
    "chars": 214,
    "preview": "[package]\nname = \"tdigest-lib\"\nversion = \"0.0.0\"\nedition = \"2021\"\n\n[lib]\nname = \"timescaledb_toolkit_tdigest\"\ncrate-type"
  },
  {
    "path": "crates/t-digest-lib/src/lib.rs",
    "chars": 1873,
    "preview": "// There is no safety here:  it's all in the hands of the caller, bless their heart.\n#![allow(clippy::missing_safety_doc"
  },
  {
    "path": "crates/time-weighted-average/Cargo.toml",
    "chars": 448,
    "preview": "[package]\nname = \"time_weighted_average\"\nversion = \"0.1.0\"\nauthors = [\"David Kohn <david@timescale.com>\"]\nedition = \"202"
  },
  {
    "path": "crates/time-weighted-average/src/lib.rs",
    "chars": 29748,
    "preview": "use serde::{Deserialize, Serialize};\nuse tspoint::TSPoint;\n\nuse flat_serialize_macro::FlatSerializable;\n\n#[derive(Clone,"
  },
  {
    "path": "crates/tspoint/Cargo.toml",
    "chars": 357,
    "preview": "[package]\nname = \"tspoint\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://doc.rust"
  },
  {
    "path": "crates/tspoint/src/lib.rs",
    "chars": 6064,
    "preview": "use serde::{ser::SerializeStruct, Deserialize, Serialize};\n\nuse flat_serialize_macro::FlatSerializable;\n\nuse std::ffi::C"
  },
  {
    "path": "crates/udd-sketch/Cargo.toml",
    "chars": 362,
    "preview": "[package]\nname = \"uddsketch\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://doc.ru"
  },
  {
    "path": "crates/udd-sketch/src/lib.rs",
    "chars": 33822,
    "preview": "//! UDDSketch implementation in rust.\n//! Based on the paper: https://arxiv.org/abs/2004.08604\n\nuse serde::{Deserialize,"
  },
  {
    "path": "docker/README.md",
    "chars": 2481,
    "preview": "# Docker images\n\nTo speed up builds, we are using a set of pre-build docker images and\nthe Docker files for that is pres"
  },
  {
    "path": "docker/ci/Dockerfile",
    "chars": 1009,
    "preview": "ARG ARCH\nARG OS_NAME\nARG OS_VERSION\n\n# Without DockerKit, this doesn't work, even though documentation suggests it shoul"
  },
  {
    "path": "docker/ci/setup.sh",
    "chars": 9031,
    "preview": "#!/bin/sh\n\n# TODO rename to tools/setup - this is useful even for developer setup (add Mac/brew support)\n\nset -ex\n\nif [ "
  },
  {
    "path": "docs/README.md",
    "chars": 3652,
    "preview": "# TimescaleDB Toolkit Documentation\n---\nThe TimescaleDB Toolkit project contains a number of utilities for working with "
  },
  {
    "path": "docs/asap.md",
    "chars": 6737,
    "preview": "# ASAP Smoothing [<sup><mark>experimental</mark></sup>](/docs/README.md#tag-notes)\n\n> [Description](#asap-description)<b"
  },
  {
    "path": "docs/client.md",
    "chars": 6095,
    "preview": "# Client-side aggregation [<sup><mark>experimental</mark></sup>](/docs/README.md#tag-notes)\n\n- Current status: prototype"
  },
  {
    "path": "docs/counter_agg.md",
    "chars": 40468,
    "preview": "# Counter Aggregates\n\n> [Description](#counter-agg-description)<br>\n> [Example Usage](#counter-agg-examples)<br>\n> [API]"
  },
  {
    "path": "docs/examples/tdigest.c",
    "chars": 2195,
    "preview": "// cc -o tdigest tdigest.c $CARGO_TARGET_DIR/$PROFILE/libtimescaledb_toolkit_tdigest.a -lm -lpthread -ldl\n\n// Sample pro"
  },
  {
    "path": "docs/examples/tdigest.py",
    "chars": 3152,
    "preview": "import ctypes\nimport os\n\n_cdll = ctypes.CDLL(os.path.join(\n    os.getenv('CARGO_TARGET_DIR', 'target'),\n    os.getenv('P"
  },
  {
    "path": "docs/gauge_agg.md",
    "chars": 2159,
    "preview": "# Gauge Aggregates [<sup><mark>experimental</mark></sup>](/docs/README.md#tag-notes)\n\nA gauge is a metric similar to a c"
  },
  {
    "path": "docs/hyperloglog.md",
    "chars": 5369,
    "preview": "# Hyperloglog\n\n> [Description](#hyperloglog-description)<br>\n> [Details](#hyperloglog-details)<br>\n> [API](#hyperloglog-"
  },
  {
    "path": "docs/lttb.md",
    "chars": 5009,
    "preview": "# Largest Triangle Three Buckets\n\n> [Description](#description)<br>\n> [Example](#example)<br>\n> [API](#api)\n\n## Descript"
  },
  {
    "path": "docs/ordered-aggregates.md",
    "chars": 6364,
    "preview": "# Implementing aggregates that require ordered inputs\n\nPostgreSQL has a couple different ways of dealing with aggregates"
  },
  {
    "path": "docs/percentile_approximation.md",
    "chars": 31882,
    "preview": "# Approximate Percentiles\n> [Why To Use Approximate Percentiles](#why-use)<br>\n> [API](#percentile-approx-api) <br>\n> [A"
  },
  {
    "path": "docs/release.md",
    "chars": 10342,
    "preview": "# Release and build procedures\n\nWe build the timescaledb_toolkit extension using Cargo, but we have many\nhigher-level ta"
  },
  {
    "path": "docs/rolling_average_api_working.md",
    "chars": 7024,
    "preview": "\n# Info dump on rolling average APIs #\n\nRolling averages are currently nasty to do with with timescaledb (user complaint"
  },
  {
    "path": "docs/state_agg.md",
    "chars": 19909,
    "preview": "# State Aggregation [<sup><mark>experimental</mark></sup>](/docs/README.md#tag-notes)\n\n# Test table\n\nExamples below are "
  },
  {
    "path": "docs/stats_agg.md",
    "chars": 3239,
    "preview": "# Statistical Aggregates\n\n## Common 1-D Statistical Functions\n- `average`\n- `sum`\n- `num_vals`\n- `stddev`(population and"
  },
  {
    "path": "docs/tdigest.md",
    "chars": 15270,
    "preview": "# T-Digest\n\n> [Description](#tdigest-description)<br>\n> [Details](#tdigest-details)<br>\n> [Example](#tdigest-example)<br"
  },
  {
    "path": "docs/template.md",
    "chars": 1924,
    "preview": "# FEATURE-NAME [<sup><mark>experimental</mark></sup>](/docs/README.md#tag-notes)\n\n- Current status: ( prototype | experi"
  },
  {
    "path": "docs/test_caggs.md",
    "chars": 4536,
    "preview": "# Continuous aggregation tests\n\nThis document serves as a driver for allowing our doctester to verify the behavior of so"
  },
  {
    "path": "docs/test_candlestick_agg.md",
    "chars": 3441,
    "preview": "# Candlestick Continuous Aggregation Tests\n\n## Setup table\n```SQL,non-transactional,ignore-output\nSET TIME ZONE 'UTC';\nC"
  },
  {
    "path": "docs/time_weighted_average.md",
    "chars": 16446,
    "preview": "# Time Weighted Average\n\n> [Description](#time-weighted-average-description)<br>\n> [Example Usage](time-weighted-average"
  },
  {
    "path": "docs/timeseries.md",
    "chars": 6425,
    "preview": "# Timevector\n\n> [Description](#timevector-description)<br>\n> [Timevector Pipelines](#timevector-pipelines)<br>\n> [Exampl"
  },
  {
    "path": "docs/timeseries_pipeline_elements.md",
    "chars": 10175,
    "preview": "# Timevector Pipelines [<sup><mark>experimental</mark></sup>](/docs/README.md#tag-notes)\n\n> [Description](#timevector-pi"
  },
  {
    "path": "docs/two-step_aggregation.md",
    "chars": 11140,
    "preview": "# Two-Step Aggregation - What It Is and Why We Use It\n\n## What is a Two-Step Aggregate <a id=\"two-step-description\"></a>"
  },
  {
    "path": "docs/uddsketch.md",
    "chars": 16584,
    "preview": "# UddSketch\n\n> [Description](#uddsketch-description)<br>\n> [Details](#uddsketch-details)<br>\n> [Example](#uddsketch-exam"
  },
  {
    "path": "extension/.gitignore",
    "chars": 71,
    "preview": ".DS_Store\n.idea/\n.vscode/\n/target\n*.iml\n**/*.rs.bk\nsql/*.generated.sql\n"
  },
  {
    "path": "extension/Cargo.toml",
    "chars": 1917,
    "preview": "[package]\nname = \"timescaledb_toolkit\"\nversion = \"1.22.0-dev\"\nedition = \"2021\"\n\n[[bin]]\nname = \"pgrx_embed_timescaledb_t"
  },
  {
    "path": "extension/src/accessors/tests.rs",
    "chars": 441,
    "preview": "use pgrx::*;\n\nuse super::accessor;\n\n//use crate::{accessor, build};\n\n// TODO don't require that trailing comma\naccessor!"
  },
  {
    "path": "extension/src/accessors.rs",
    "chars": 15225,
    "preview": "use pgrx::*;\n\nuse counter_agg::range::I64Range;\n\nuse crate::{build, flatten, pg_type, ron_inout_funcs};\n\nmacro_rules! ac"
  },
  {
    "path": "extension/src/aggregate_builder_tests.rs",
    "chars": 7269,
    "preview": "// Tests for `aggregate_builder::aggregate`. This can't be in the\n// aggregate_builder crate because it requires too muc"
  },
  {
    "path": "extension/src/aggregate_utils.rs",
    "chars": 1282,
    "preview": "use std::ptr::null_mut;\n\nuse pgrx::pg_sys;\n\n// TODO move to func_utils once there are enough function to warrant one\npub"
  },
  {
    "path": "extension/src/asap.rs",
    "chars": 12968,
    "preview": "use asap::*;\nuse pgrx::*;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    aggregate_utils::in_aggregate_context,\n"
  },
  {
    "path": "extension/src/bin/pgrx_embed.rs",
    "chars": 86,
    "preview": "// so we can support upgrading pgrx\n#![allow(unexpected_cfgs)]\n::pgrx::pgrx_embed!();\n"
  },
  {
    "path": "extension/src/candlestick.rs",
    "chars": 38220,
    "preview": "use pgrx::*;\nuse serde::{Deserialize, Serialize};\n\nuse crate::accessors::{\n    AccessorClose, AccessorCloseTime, Accesso"
  },
  {
    "path": "extension/src/counter_agg/accessors.rs",
    "chars": 2642,
    "preview": "use pgrx::*;\n\nuse crate::{\n    counter_agg::{CounterSummary, CounterSummaryData, MetricSummary},\n    datum_utils::interv"
  },
  {
    "path": "extension/src/counter_agg.rs",
    "chars": 63559,
    "preview": "use serde::{Deserialize, Serialize};\n\nuse pgrx::*;\n\nuse crate::{\n    accessors::{\n        AccessorCorr, AccessorCounterZ"
  },
  {
    "path": "extension/src/countminsketch.rs",
    "chars": 9369,
    "preview": "use pgrx::*;\n\nuse aggregate_builder::aggregate;\nuse countminsketch::{CountMinHashFn, CountMinSketch as CountMinSketchInt"
  },
  {
    "path": "extension/src/datum_utils.rs",
    "chars": 24151,
    "preview": "use std::{\n    fmt,\n    hash::{BuildHasher, Hasher},\n    mem::size_of,\n    slice,\n};\n\nuse serde::{\n    de::{SeqAccess, V"
  },
  {
    "path": "extension/src/duration.rs",
    "chars": 3285,
    "preview": "//! Utilities for working with durations. Parsing of duration units is intended to match how\n//! PostgreSQL parses durat"
  },
  {
    "path": "extension/src/frequency.rs",
    "chars": 88784,
    "preview": "//! Based on the paper: https://cs.ucsb.edu/sites/default/files/documents/2005-23.pdf\n\nuse std::fmt;\n\nuse pgrx::{\n    it"
  },
  {
    "path": "extension/src/gauge_agg.rs",
    "chars": 40007,
    "preview": "use pgrx::*;\n\nuse serde::{Deserialize, Serialize};\n\nuse counter_agg::{range::I64Range, GaugeSummaryBuilder, MetricSummar"
  },
  {
    "path": "extension/src/heartbeat_agg/accessors.rs",
    "chars": 3874,
    "preview": "use pgrx::*;\n\nuse crate::{\n    flatten,\n    heartbeat_agg::{HeartbeatAgg, HeartbeatAggData},\n    pg_type, ron_inout_func"
  },
  {
    "path": "extension/src/heartbeat_agg.rs",
    "chars": 63409,
    "preview": "use pgrx::iter::TableIterator;\nuse pgrx::*;\n\nuse crate::{\n    accessors::{\n        AccessorDeadRanges, AccessorDowntime,"
  },
  {
    "path": "extension/src/hyperloglog.rs",
    "chars": 35429,
    "preview": "#![allow(clippy::identity_op)] // clippy gets confused by flat_serialize! enums\n\nuse std::{\n    convert::TryInto,\n    ha"
  },
  {
    "path": "extension/src/lib.rs",
    "chars": 2149,
    "preview": "// so we can support upgrading pgrx\n#![allow(unexpected_cfgs)]\n// so we can allow very new Clippy lints\n#![allow(unknown"
  },
  {
    "path": "extension/src/lttb.rs",
    "chars": 22545,
    "preview": "use pgrx::*;\nuse std::borrow::Cow;\n\nuse crate::{\n    aggregate_utils::in_aggregate_context,\n    flatten,\n    palloc::{In"
  },
  {
    "path": "extension/src/nmost/max_by_float.rs",
    "chars": 6369,
    "preview": "use pgrx::{iter::TableIterator, *};\n\nuse crate::nmost::max_float::*;\nuse crate::nmost::*;\n\nuse crate::{\n    build, flatt"
  },
  {
    "path": "extension/src/nmost/max_by_int.rs",
    "chars": 5995,
    "preview": "use pgrx::{iter::TableIterator, *};\n\nuse crate::nmost::max_int::*;\nuse crate::nmost::*;\n\nuse crate::{\n    build, flatten"
  },
  {
    "path": "extension/src/nmost/max_by_time.rs",
    "chars": 6699,
    "preview": "use pgrx::{iter::TableIterator, *};\n\nuse crate::nmost::max_time::*;\nuse crate::nmost::*;\n\nuse crate::{\n    build, flatte"
  },
  {
    "path": "extension/src/nmost/max_float.rs",
    "chars": 8546,
    "preview": "use pgrx::{iter::SetOfIterator, *};\n\nuse crate::nmost::*;\n\nuse crate::{\n    accessors::{AccessorIntoArray, AccessorIntoV"
  },
  {
    "path": "extension/src/nmost/max_int.rs",
    "chars": 7681,
    "preview": "use pgrx::{iter::SetOfIterator, *};\n\nuse crate::nmost::*;\n\nuse crate::{\n    accessors::{AccessorIntoArray, AccessorIntoV"
  },
  {
    "path": "extension/src/nmost/max_time.rs",
    "chars": 9090,
    "preview": "use pgrx::{iter::SetOfIterator, *};\n\nuse crate::nmost::*;\n\nuse crate::{\n    accessors::{AccessorIntoArray, AccessorIntoV"
  },
  {
    "path": "extension/src/nmost/min_by_float.rs",
    "chars": 6275,
    "preview": "use pgrx::{iter::TableIterator, *};\n\nuse crate::nmost::min_float::*;\nuse crate::nmost::*;\n\nuse crate::{\n    build, flatt"
  },
  {
    "path": "extension/src/nmost/min_by_int.rs",
    "chars": 5659,
    "preview": "use pgrx::{iter::TableIterator, *};\n\nuse crate::nmost::min_int::*;\nuse crate::nmost::*;\n\nuse crate::{\n    build, flatten"
  },
  {
    "path": "extension/src/nmost/min_by_time.rs",
    "chars": 6347,
    "preview": "use pgrx::{iter::TableIterator, *};\n\nuse crate::nmost::min_time::*;\nuse crate::nmost::*;\n\nuse crate::{\n    build, flatte"
  },
  {
    "path": "extension/src/nmost/min_float.rs",
    "chars": 8355,
    "preview": "use pgrx::{iter::SetOfIterator, *};\n\nuse crate::nmost::*;\n\nuse crate::{\n    accessors::{AccessorIntoArray, AccessorIntoV"
  },
  {
    "path": "extension/src/nmost/min_int.rs",
    "chars": 7390,
    "preview": "use pgrx::{iter::SetOfIterator, *};\n\nuse crate::nmost::*;\n\nuse crate::{\n    accessors::{AccessorIntoArray, AccessorIntoV"
  },
  {
    "path": "extension/src/nmost/min_time.rs",
    "chars": 8779,
    "preview": "use pgrx::{iter::SetOfIterator, *};\n\nuse crate::nmost::*;\n\nuse crate::{\n    accessors::{AccessorIntoArray, AccessorIntoV"
  },
  {
    "path": "extension/src/nmost.rs",
    "chars": 8470,
    "preview": "use pgrx::*;\n\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    aggregate_utils::in_aggregate_context,\n    datum_ut"
  },
  {
    "path": "extension/src/palloc.rs",
    "chars": 5227,
    "preview": "use std::{\n    alloc::{GlobalAlloc, Layout, System},\n    ops::{Deref, DerefMut},\n    ptr::NonNull,\n};\n\nuse pgrx::*;\n\npub"
  },
  {
    "path": "extension/src/pg_any_element.rs",
    "chars": 4020,
    "preview": "use std::{\n    collections::HashMap,\n    hash::{Hash, Hasher},\n    mem::size_of,\n};\n\nuse pgrx::*;\n\nuse pg_sys::{Datum, O"
  },
  {
    "path": "extension/src/range.rs",
    "chars": 5088,
    "preview": "use counter_agg::range::I64Range;\nuse pgrx::{extension_sql, pg_sys};\nuse serde::{Deserialize, Serialize};\nuse std::conve"
  },
  {
    "path": "extension/src/raw.rs",
    "chars": 5836,
    "preview": "#![allow(non_camel_case_types)]\n\nuse pgrx::*;\nuse pgrx_sql_entity_graph::metadata::{\n    ArgumentError, Returns, Returns"
  },
  {
    "path": "extension/src/saturation.rs",
    "chars": 2645,
    "preview": "//! Saturating Math for Integers\n\nuse pgrx::*;\n\n/// Computes x+y, saturating at the numeric bounds instead of overflowin"
  },
  {
    "path": "extension/src/serialization/collations.rs",
    "chars": 11903,
    "preview": "use std::{\n    ffi::{CStr, CString},\n    mem::{align_of, size_of, MaybeUninit},\n    os::raw::c_char,\n    slice,\n};\n\nuse "
  },
  {
    "path": "extension/src/serialization/functions.rs",
    "chars": 2229,
    "preview": "use std::{\n    ffi::{CStr, CString},\n    mem::{align_of, size_of, MaybeUninit},\n    os::raw::c_char,\n    slice,\n};\n\nuse "
  },
  {
    "path": "extension/src/serialization/types.rs",
    "chars": 14199,
    "preview": "use std::{\n    ffi::{CStr, CString},\n    mem::{align_of, size_of, MaybeUninit},\n    slice,\n};\n\nuse flat_serialize::{impl"
  },
  {
    "path": "extension/src/serialization.rs",
    "chars": 5670,
    "preview": "pub use self::collations::PgCollationId;\npub use self::functions::PgProcId;\npub use self::types::ShortTypeId;\nuse std::{"
  },
  {
    "path": "extension/src/stabilization_info.rs",
    "chars": 53294,
    "preview": "// This file serves as the canonical database for what functionality Toolkit has\n// stabilized and in which version they"
  },
  {
    "path": "extension/src/stabilization_tests.rs",
    "chars": 9765,
    "preview": "#[cfg(any(test, feature = \"pg_test\"))]\nuse pgrx::*;\n\n#[cfg(any(test, feature = \"pg_test\"))]\n#[pg_schema]\nmod tests {\n   "
  },
  {
    "path": "extension/src/state_aggregate/accessors.rs",
    "chars": 9782,
    "preview": "use crate::{\n    datum_utils::interval_to_ms,\n    pg_type,\n    raw::{Interval, TimestampTz},\n    ron_inout_funcs,\n    st"
  },
  {
    "path": "extension/src/state_aggregate/rollup.rs",
    "chars": 19356,
    "preview": "use super::{toolkit_experimental::*, *};\nuse crate::{\n    aggregate_utils::in_aggregate_context,\n    palloc::{InternalAs"
  },
  {
    "path": "extension/src/state_aggregate.rs",
    "chars": 92202,
    "preview": "//! SELECT duration_in('STOPPED', states) as run_time, duration_in('ERROR', states) as error_time FROM (\n//!   SELECT co"
  },
  {
    "path": "extension/src/stats_agg.rs",
    "chars": 73610,
    "preview": "use pgrx::*;\n\nuse crate::{\n    accessors::{\n        AccessorAverage, AccessorAverageX, AccessorAverageY, AccessorCorr, A"
  },
  {
    "path": "extension/src/tdigest.rs",
    "chars": 25850,
    "preview": "use std::{convert::TryInto, ops::Deref};\n\nuse pgrx::*;\n\nuse crate::{\n    accessors::{\n        AccessorApproxPercentile, "
  },
  {
    "path": "extension/src/time_vector/iter.rs",
    "chars": 850,
    "preview": "use tspoint::TSPoint;\n\nuse Iter::*;\n\npub enum Iter<'a> {\n    Slice {\n        iter: flat_serialize::Iter<'a, 'a, TSPoint>"
  },
  {
    "path": "extension/src/time_vector/pipeline/aggregation.rs",
    "chars": 47740,
    "preview": "use std::mem::take;\n\nuse pgrx::*;\n\nuse counter_agg::CounterSummaryBuilder;\n\nuse super::*;\n\nuse crate::{\n    accessors::{"
  },
  {
    "path": "extension/src/time_vector/pipeline/arithmetic.rs",
    "chars": 22252,
    "preview": "use pgrx::*;\n\nuse super::*;\n\nuse super::Element::Arithmetic;\nuse Function::*;\n\n#[derive(\n    Debug, Copy, Clone, flat_se"
  },
  {
    "path": "extension/src/time_vector/pipeline/delta.rs",
    "chars": 4421,
    "preview": "use pgrx::*;\n\nuse super::*;\n\nuse crate::accessors::AccessorDelta;\n\n// TODO is (immutable, parallel_safe) correct?\n#[pg_e"
  },
  {
    "path": "extension/src/time_vector/pipeline/expansion.rs",
    "chars": 11212,
    "preview": "use std::mem::take;\n\nuse pgrx::{iter::TableIterator, *};\n\nuse super::*;\n\nuse crate::{build, pg_type, ron_inout_funcs};\n\n"
  },
  {
    "path": "extension/src/time_vector/pipeline/fill_to.rs",
    "chars": 10461,
    "preview": "use pgrx::*;\n\nuse flat_serialize_macro::FlatSerializable;\n\nuse serde::{Deserialize, Serialize};\n\nuse super::*;\n\n// TODO:"
  },
  {
    "path": "extension/src/time_vector/pipeline/filter.rs",
    "chars": 4540,
    "preview": "use pgrx::*;\n\nuse super::*;\n\n// TODO is (stable, parallel_safe) correct?\n#[pg_extern(\n    immutable,\n    parallel_safe,\n"
  },
  {
    "path": "extension/src/time_vector/pipeline/lambda/executor.rs",
    "chars": 12482,
    "preview": "use pgrx::*;\n\nuse super::*;\n\npub struct ExpressionExecutor<'e, T> {\n    exprs: &'e Expression,\n    var_vals: Vec<Option<"
  },
  {
    "path": "extension/src/time_vector/pipeline/lambda/lambda_expr.pest",
    "chars": 1254,
    "preview": "calculation = _{ SOI ~ let_expr ~ EOI }\nlet_expr = { (\"let\" ~ var ~ \"=\" ~ tuple ~ \";\")* ~ tuple }\ntuple = { binops ~ (\","
  },
  {
    "path": "extension/src/time_vector/pipeline/lambda/parser.rs",
    "chars": 16377,
    "preview": "use std::{collections::HashMap, ffi::CString};\n\nuse pgrx::*;\n\nuse super::*;\n\nuse pest::{\n    iterators::{Pair, Pairs},\n "
  },
  {
    "path": "extension/src/time_vector/pipeline/lambda.rs",
    "chars": 30610,
    "preview": "use std::borrow::Cow;\n\nuse pgrx::{\n    iter::{SetOfIterator, TableIterator},\n    *,\n};\n\nuse super::*;\n\npub use executor:"
  },
  {
    "path": "extension/src/time_vector/pipeline/map.rs",
    "chars": 29599,
    "preview": "use std::{\n    mem::{self, ManuallyDrop, MaybeUninit},\n    ptr,\n};\n\nuse pgrx::*;\n\nuse super::*;\n\nuse crate::serializatio"
  },
  {
    "path": "extension/src/time_vector/pipeline/sort.rs",
    "chars": 5096,
    "preview": "use pgrx::*;\n\nuse super::*;\n\n// TODO is (immutable, parallel_safe) correct?\n#[pg_extern(\n    immutable,\n    parallel_saf"
  },
  {
    "path": "extension/src/time_vector/pipeline.rs",
    "chars": 18249,
    "preview": "mod aggregation;\nmod arithmetic;\nmod delta;\nmod expansion;\nmod fill_to;\nmod filter;\nmod lambda;\nmod map;\nmod sort;\n\nuse "
  },
  {
    "path": "extension/src/time_vector.rs",
    "chars": 36277,
    "preview": "#![allow(clippy::identity_op)] // clippy gets confused by pg_type! enums\n\nuse crate::pg_sys::timestamptz_to_str;\nuse cor"
  },
  {
    "path": "extension/src/time_weighted_average/accessors.rs",
    "chars": 3520,
    "preview": "use pgrx::*;\n\nuse crate::time_weighted_average::DurationUnit;\nuse crate::{\n    datum_utils::interval_to_ms,\n    flatten,"
  },
  {
    "path": "extension/src/time_weighted_average.rs",
    "chars": 38463,
    "preview": "#![allow(non_camel_case_types)]\n\nuse pgrx::*;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    accessors::{\n      "
  },
  {
    "path": "extension/src/type_builder.rs",
    "chars": 32954,
    "preview": "#[derive(Copy, Clone, Debug, serde::Serialize)]\npub enum CachedDatum<'r> {\n    None,\n    FromInput(&'r [u8]),\n    Flatte"
  },
  {
    "path": "extension/src/uddsketch.rs",
    "chars": 42413,
    "preview": "use pgrx::*;\n\nuse encodings::{delta, prefix_varint};\n\nuse uddsketch::{SketchHashKey, UDDSketch as UddSketchInternal, UDD"
  },
  {
    "path": "extension/src/utilities.rs",
    "chars": 8445,
    "preview": "use crate::raw::TimestampTz;\nuse pgrx::prelude::*;\n\n#[pg_extern(\n    name = \"generate_periodic_normal_series\",\n    schem"
  },
  {
    "path": "extension/timescaledb_toolkit.control",
    "chars": 565,
    "preview": "comment = 'Library of analytical hyperfunctions, time-series pipelining, and other SQL utilities'\ndefault_version = '@CA"
  },
  {
    "path": "tests/update/candlestick.md",
    "chars": 1362,
    "preview": "# Candlestick Tests\n\n## Get candlestick values from tick data\n\n\n```sql,creation,min-toolkit-version=1.14.0\nCREATE TABLE "
  },
  {
    "path": "tests/update/heartbeat.md",
    "chars": 1890,
    "preview": "# Candlestick Tests\n\n## Get candlestick values from tick data\n\n\n```sql,creation,min-toolkit-version=1.15.0\nCREATE TABLE "
  },
  {
    "path": "tests/update/original_update_tests.md",
    "chars": 1166,
    "preview": "# Original Update Tests\n\n\n\n```sql,creation,min-toolkit-version=1.4.0\nCREATE TABLE test_data(ts timestamptz, val DOUBLE P"
  },
  {
    "path": "tests/update/state_agg.md",
    "chars": 942,
    "preview": "# `state_agg` tests\n\n```sql,creation,min-toolkit-version=1.15.0\nCREATE TABLE states_test(ts TIMESTAMPTZ, state TEXT);\nIN"
  },
  {
    "path": "tests/update/time-vector.md",
    "chars": 1510,
    "preview": "# Time Vector Tests\n\n```sql,creation\nCREATE TABLE time_vector_data(time TIMESTAMPTZ, value DOUBLE PRECISION);\nINSERT INT"
  },
  {
    "path": "tests/update/time-weighted-average.md",
    "chars": 1845,
    "preview": "# Time Weighted Average Tests\n\n## Test integral and interpolated integral\n\n```sql,creation,min-toolkit-version=1.15.0\nCR"
  },
  {
    "path": "tools/build",
    "chars": 3947,
    "preview": "#!/bin/sh\n\nset -ex\n\nprint() {\n    printf '%s\\n' \"$*\"\n}\n\ndie() {\n    st=${?:-0}\n    if [ $st -eq 0 ]; then\n        st=2\n "
  },
  {
    "path": "tools/dependencies.sh",
    "chars": 1029,
    "preview": "# Dependency configuration\n# Ideally, all dependencies would be specified in just one place.\n# Exceptions:\n# - crate dep"
  },
  {
    "path": "tools/install-timescaledb",
    "chars": 252,
    "preview": "#!/bin/sh\ngit clone \"$2\" timescaledb\ncd timescaledb\ngit switch --detach \"$3\"\nmkdir build\ncd build\n# this overwrites the "
  },
  {
    "path": "tools/post-install/Cargo.toml",
    "chars": 114,
    "preview": "[package]\nname = \"post-install\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[dependencies]\nxshell = \"0.1.17\"\nwalkdir = \"2\""
  },
  {
    "path": "tools/post-install/src/main.rs",
    "chars": 10366,
    "preview": "#![allow(unexpected_cfgs)]\n\nuse std::{\n    env,\n    fs::{self, File},\n    io::{BufRead, BufReader, BufWriter, Write},\n  "
  },
  {
    "path": "tools/post-install/src/update_script.rs",
    "chars": 22283,
    "preview": "use std::{\n    collections::HashSet,\n    io::{BufRead, Write},\n    iter::Peekable,\n};\n\nuse crate::PushLine;\n\nstatic ALTE"
  },
  {
    "path": "tools/release",
    "chars": 12081,
    "preview": "#!/bin/sh\n\n# This script automates release creation:\n# 1. Create release branch from target commit.\n# 1a. Validate conte"
  },
  {
    "path": "tools/sql-doctester/Cargo.toml",
    "chars": 432,
    "preview": "[package]\nname = \"sql-doctester\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[dependencies]\nbytecount = \"0.6.2\"\nclap = { version"
  },
  {
    "path": "tools/sql-doctester/Readme.md",
    "chars": 4210,
    "preview": "## SQL Doctester ##\n\nTest SQL code in Markdown files.\n\nThis tool looks through a directory for markdown files containing"
  },
  {
    "path": "tools/sql-doctester/src/main.rs",
    "chars": 3520,
    "preview": "use std::{\n    collections::HashMap,\n    ffi::OsStr,\n    fs,\n    io::{self, Write},\n    process::exit,\n};\n\nuse colored::"
  },
  {
    "path": "tools/sql-doctester/src/parser.rs",
    "chars": 12218,
    "preview": "use std::collections::HashMap;\n\nuse pulldown_cmark::{\n    CodeBlockKind::Fenced,\n    CowStr, Event, Parser,\n    Tag::{Co"
  },
  {
    "path": "tools/sql-doctester/src/runner.rs",
    "chars": 13656,
    "preview": "use rayon::{iter::ParallelIterator, prelude::*};\n\nuse std::{borrow::Cow, error::Error, fmt};\n\nuse colored::Colorize;\n\nus"
  },
  {
    "path": "tools/sql-doctester/src/startup.sql",
    "chars": 292,
    "preview": "CREATE EXTENSION timescaledb;\nCREATE EXTENSION timescaledb_toolkit;\nSET SESSION TIMEZONE TO 'UTC';\n\n-- utility for gener"
  },
  {
    "path": "tools/testbin",
    "chars": 8769,
    "preview": "#!/bin/sh\n\n# This script automates binary upgrade testing.\n\n# Sample run:\n# OS_NAME=ubuntu OS_VERSION=24.04 tools/testbi"
  },
  {
    "path": "tools/update-tester/Cargo.toml",
    "chars": 596,
    "preview": "[package]\nname = \"update-tester\"\nversion = \"0.3.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://do"
  },
  {
    "path": "tools/update-tester/Readme.md",
    "chars": 1870,
    "preview": "# Update Tester #\n\nRuns update tests. It'll install every version of the extension marked as\n`upgradeable_from` in `time"
  },
  {
    "path": "tools/update-tester/src/installer.rs",
    "chars": 6830,
    "preview": "#![allow(unexpected_cfgs)]\n\nuse std::{collections::HashSet, path::Path};\n\nuse colored::Colorize;\nuse semver::Version;\nus"
  },
  {
    "path": "tools/update-tester/src/main.rs",
    "chars": 13637,
    "preview": "use std::{\n    collections::HashSet,\n    io::{self, Write},\n    path::Path,\n    process,\n};\n\nuse clap::Arg;\nuse clap::Co"
  },
  {
    "path": "tools/update-tester/src/parser.rs",
    "chars": 16294,
    "preview": "use std::{collections::HashMap, ffi::OsStr, fs, path::Path};\n\nuse pulldown_cmark::{\n    CodeBlockKind::Fenced,\n    CowSt"
  },
  {
    "path": "tools/update-tester/src/testrunner/stabilization.rs",
    "chars": 1776,
    "preview": "pub use stabilization_info::*;\n\n#[path = \"../../../../extension/src/stabilization_info.rs\"]\nmod stabilization_info;\n\n#[m"
  },
  {
    "path": "tools/update-tester/src/testrunner.rs",
    "chars": 24491,
    "preview": "use colored::Colorize;\nuse semver::{BuildMetadata, Prerelease, Version};\n\nuse crate::{defer, parser, Deferred};\nuse post"
  }
]

About this extraction

This page contains the full source code of the timescale/timescaledb-toolkit GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 195 files (2.3 MB), approximately 618.6k tokens, and a symbol index with 2504 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!