Full Code of buraksezer/olricdb for AI

master 792bb41fc287 cached
202 files
981.6 KB
279.8k tokens
1519 symbols
1 requests
Download .txt
Showing preview only (1,040K chars total). Download the full file or copy to clipboard to get everything.
Repository: buraksezer/olricdb
Branch: master
Commit: 792bb41fc287
Files: 202
Total size: 981.6 KB

Directory structure:
gitextract_bkk5kycd/

├── .github/
│   ├── FUNDING.yml
│   └── workflows/
│       ├── ci.yml
│       ├── codeql-analysis.yml
│       └── golangci-lint.yml
├── .gitignore
├── Dockerfile
├── LICENSE
├── Makefile
├── README.md
├── auth.go
├── auth_test.go
├── client.go
├── cluster.go
├── cluster_client.go
├── cluster_client_test.go
├── cluster_iterator.go
├── cluster_iterator_test.go
├── cluster_test.go
├── cmd/
│   └── olric-server/
│       ├── main.go
│       ├── olric-server-local.yaml
│       └── server/
│           └── server.go
├── config/
│   ├── authentication.go
│   ├── client.go
│   ├── config.go
│   ├── config_test.go
│   ├── dmap.go
│   ├── dmap_test.go
│   ├── dmaps.go
│   ├── engine.go
│   ├── engine_test.go
│   ├── internal/
│   │   └── loader/
│   │       └── loader.go
│   ├── load.go
│   ├── memberlist.go
│   ├── network.go
│   └── network_test.go
├── docker/
│   ├── README.md
│   ├── docker-compose.yml
│   ├── nginx.conf
│   └── olric-server-consul.yaml
├── embedded_client.go
├── embedded_client_test.go
├── embedded_iterator.go
├── embedded_iterator_test.go
├── events/
│   ├── cluster_events.go
│   └── cluster_events_test.go
├── get_response.go
├── get_response_test.go
├── go.mod
├── go.sum
├── hasher/
│   └── hasher.go
├── integration_test.go
├── internal/
│   ├── bufpool/
│   │   ├── bufpool.go
│   │   └── bufpool_test.go
│   ├── checkpoint/
│   │   ├── checkpoint.go
│   │   └── checkpoint_test.go
│   ├── cluster/
│   │   ├── balancer/
│   │   │   ├── balancer.go
│   │   │   └── balancer_test.go
│   │   ├── partitions/
│   │   │   ├── fragment.go
│   │   │   ├── hkey.go
│   │   │   ├── hkey_test.go
│   │   │   ├── partition.go
│   │   │   ├── partition_test.go
│   │   │   ├── partitions.go
│   │   │   └── partitions_test.go
│   │   └── routingtable/
│   │       ├── callback.go
│   │       ├── callback_test.go
│   │       ├── discovery.go
│   │       ├── discovery_test.go
│   │       ├── distribute.go
│   │       ├── distribute_test.go
│   │       ├── events.go
│   │       ├── events_test.go
│   │       ├── handlers.go
│   │       ├── left_over_data.go
│   │       ├── left_over_data_test.go
│   │       ├── members.go
│   │       ├── members_test.go
│   │       ├── operations.go
│   │       ├── routingtable.go
│   │       ├── routingtable_test.go
│   │       └── update.go
│   ├── discovery/
│   │   ├── delegate.go
│   │   ├── discovery.go
│   │   ├── discovery_test.go
│   │   ├── events.go
│   │   ├── member.go
│   │   └── member_test.go
│   ├── dmap/
│   │   ├── atomic.go
│   │   ├── atomic_handlers.go
│   │   ├── atomic_test.go
│   │   ├── balance.go
│   │   ├── balance_test.go
│   │   ├── compaction.go
│   │   ├── compaction_test.go
│   │   ├── config.go
│   │   ├── config_test.go
│   │   ├── delete.go
│   │   ├── delete_handlers.go
│   │   ├── delete_test.go
│   │   ├── destroy.go
│   │   ├── destroy_handlers.go
│   │   ├── destroy_test.go
│   │   ├── dmap.go
│   │   ├── dmap_test.go
│   │   ├── env.go
│   │   ├── eviction.go
│   │   ├── eviction_test.go
│   │   ├── expire.go
│   │   ├── expire_handlers.go
│   │   ├── expire_test.go
│   │   ├── fragment.go
│   │   ├── fragment_test.go
│   │   ├── get.go
│   │   ├── get_handlers.go
│   │   ├── get_test.go
│   │   ├── handlers.go
│   │   ├── janitor.go
│   │   ├── lock.go
│   │   ├── lock_handlers.go
│   │   ├── lock_test.go
│   │   ├── put.go
│   │   ├── put_handlers.go
│   │   ├── put_test.go
│   │   ├── scan_handlers.go
│   │   ├── scan_test.go
│   │   ├── service.go
│   │   ├── service_test.go
│   │   └── stats_test.go
│   ├── environment/
│   │   ├── environment.go
│   │   └── environment_test.go
│   ├── locker/
│   │   ├── locker.go
│   │   └── locker_test.go
│   ├── protocol/
│   │   ├── cluster.go
│   │   ├── cluster_test.go
│   │   ├── commands.go
│   │   ├── dmap.go
│   │   ├── dmap_test.go
│   │   ├── errors.go
│   │   ├── errors_test.go
│   │   ├── pubsub.go
│   │   ├── pubsub_test.go
│   │   ├── system.go
│   │   └── system_test.go
│   ├── pubsub/
│   │   ├── handlers.go
│   │   ├── handlers_test.go
│   │   ├── pubsub.go
│   │   ├── pubsub_test.go
│   │   └── service.go
│   ├── ramblock/
│   │   ├── compaction.go
│   │   ├── compaction_test.go
│   │   ├── entry/
│   │   │   ├── entry.go
│   │   │   └── entry_test.go
│   │   ├── ramblock.go
│   │   ├── ramblock_test.go
│   │   ├── table/
│   │   │   ├── pack.go
│   │   │   ├── pack_test.go
│   │   │   ├── table.go
│   │   │   └── table_test.go
│   │   └── transport.go
│   ├── resp/
│   │   ├── encoder.go
│   │   ├── encoder_test.go
│   │   └── scan.go
│   ├── roundrobin/
│   │   ├── round_robin.go
│   │   └── round_robin_test.go
│   ├── server/
│   │   ├── client.go
│   │   ├── client_test.go
│   │   ├── handler.go
│   │   ├── handler_test.go
│   │   ├── mux.go
│   │   ├── mux_test.go
│   │   ├── server.go
│   │   └── server_test.go
│   ├── service/
│   │   └── service.go
│   ├── stats/
│   │   ├── stats.go
│   │   └── stats_test.go
│   ├── testcluster/
│   │   └── testcluster.go
│   ├── testutil/
│   │   ├── mockfragment/
│   │   │   └── mockfragment.go
│   │   └── testutil.go
│   └── util/
│       ├── safe.go
│       ├── strconv.go
│       └── unsafe.go
├── olric-server-docker.yaml
├── olric.go
├── olric_test.go
├── ping.go
├── ping_test.go
├── pipeline.go
├── pipeline_test.go
├── pkg/
│   ├── flog/
│   │   └── flog.go
│   ├── neterrors/
│   │   └── errors.go
│   ├── service_discovery/
│   │   └── service_discovery.go
│   └── storage/
│       ├── config.go
│       ├── config_test.go
│       ├── engine.go
│       ├── entry.go
│       └── stats.go
├── pubsub.go
├── pubsub_test.go
├── stats/
│   ├── stats.go
│   └── stats_test.go
├── stats.go
└── stats_test.go

================================================
FILE CONTENTS
================================================

================================================
FILE: .github/FUNDING.yml
================================================
# These are supported funding model platforms

github: buraksezer
patreon: # not used anymore
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']


================================================
FILE: .github/workflows/ci.yml
================================================
name: Unit & Integration tests

on:
  push:
    branches: [ "master" ]
  pull_request:
    branches: [ "master" ]

jobs:
  test:
    strategy:
      # Default is true, cancels jobs for other platforms in the matrix if one fails
      fail-fast: false
      matrix:
        os: [ ubuntu-latest ]
        go: [ '1.23', '1.24' ]

    runs-on: ${{ matrix.os }}

    steps:
    - name: Install Go
      uses: actions/setup-go@v4
      with:
        go-version: ${{ matrix.go }}

    - name: Checkout code
      uses: actions/checkout@v3

    - name: Print Go version and environment
      id: vars
      run: |
        printf "Using go at: $(which go)\n"
        printf "Go version: $(go version)\n"
        printf "\n\nGo environment:\n\n"
        go env
        printf "\n\nSystem environment:\n\n"
        env
        # Calculate the short SHA1 hash of the git commit
        echo "::set-output name=short_sha::$(git rev-parse --short HEAD)"
        echo "::set-output name=go_cache::$(go env GOCACHE)"

    - name: Cache the build cache
      uses: actions/cache@v4
      with:
        path: ${{ steps.vars.outputs.go_cache }}
        key: ${{ runner.os }}-${{ matrix.go }}-go-ci-${{ hashFiles('**/go.sum') }}
        restore-keys: |
          ${{ runner.os }}-${{ matrix.go }}-go-ci

    - name: Install dependencies
      run: |
        go mod download

    - name: Run tests
      run: make test


================================================
FILE: .github/workflows/codeql-analysis.yml
================================================
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"

on:
  push:
    branches: [ master ]
  pull_request:
    # The branches below must be a subset of the branches above
    branches: [ master ]
  schedule:
    - cron: '28 20 * * 0'

jobs:
  analyze:
    name: Analyze
    runs-on: ubuntu-latest
    permissions:
      actions: read
      contents: read
      security-events: write

    strategy:
      fail-fast: false
      matrix:
        language: [ 'go' ]
        # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
        # Learn more about CodeQL language support at https://git.io/codeql-language-support

    steps:
    - name: Checkout repository
      uses: actions/checkout@v2

    # Initializes the CodeQL tools for scanning.
    - name: Initialize CodeQL
      uses: github/codeql-action/init@v3
      with:
        languages: ${{ matrix.language }}
        # If you wish to specify custom queries, you can do so here or in a config file.
        # By default, queries listed here will override any specified in a config file.
        # Prefix the list here with "+" to use these queries and those in the config file.
        # queries: ./path/to/local/query, your-org/your-repo/queries@main

    # Autobuild attempts to build any compiled languages  (C/C++, C#, or Java).
    # If this step fails, then you should remove it and run the build manually (see below)
    - name: Autobuild
      uses: github/codeql-action/autobuild@v3

    # ℹ️ Command-line programs to run using the OS shell.
    # 📚 https://git.io/JvXDl

    # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
    #    and modify them (or add more) to build your code if your project
    #    uses a compiled language

    #- run: |
    #   make bootstrap
    #   make release

    - name: Perform CodeQL Analysis
      uses: github/codeql-action/analyze@v3


================================================
FILE: .github/workflows/golangci-lint.yml
================================================
name: golangci-lint
on:
  push:
    branches:
      - master
  pull_request:

permissions:
  contents: read
  # Optional: allow read access to pull request. Use with `only-new-issues` option.
  # pull-requests: read

jobs:
  golangci:
    name: lint
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v4
      - uses: actions/setup-go@v5
        with:
          go-version: stable
      - name: golangci-lint
        uses: golangci/golangci-lint-action@v7
        with:
          version: v2.0

================================================
FILE: .gitignore
================================================
# Binaries for programs and plugins
*.exe
*.dll
*.so
*.dylib

# Vim creates this
*.swp

# Test binary, build with `go test -c`
*.test

# Output of the go coverage tool, specifically when used with LiteIDE
*.out

# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
.glide/

# GoLand creates this
.idea/

# OSX creates this
.DS_Store

.claude/
CLAUDE.md


================================================
FILE: Dockerfile
================================================
FROM golang:latest as build
WORKDIR /src/
COPY . /src/
RUN go mod download
RUN CGO_ENABLED=1 go build -ldflags="-s -w" -o /usr/bin/olric-server /src/cmd/olric-server

FROM gcr.io/distroless/base-debian12
COPY --from=build /usr/bin/olric-server /usr/bin/olric-server
COPY --from=build /src/olric-server-docker.yaml /etc/olric-server.yaml

EXPOSE 3320 3322
ENTRYPOINT ["/usr/bin/olric-server", "-c", "/etc/olric-server.yaml"]


================================================
FILE: LICENSE
================================================

                                 Apache License
                           Version 2.0, January 2004
                        https://www.apache.org/licenses/

   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

   1. Definitions.

      "License" shall mean the terms and conditions for use, reproduction,
      and distribution as defined by Sections 1 through 9 of this document.

      "Licensor" shall mean the copyright owner or entity authorized by
      the copyright owner that is granting the License.

      "Legal Entity" shall mean the union of the acting entity and all
      other entities that control, are controlled by, or are under common
      control with that entity. For the purposes of this definition,
      "control" means (i) the power, direct or indirect, to cause the
      direction or management of such entity, whether by contract or
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      outstanding shares, or (iii) beneficial ownership of such entity.

      "You" (or "Your") shall mean an individual or Legal Entity
      exercising permissions granted by this License.

      "Source" form shall mean the preferred form for making modifications,
      including but not limited to software source code, documentation
      source, and configuration files.

      "Object" form shall mean any form resulting from mechanical
      transformation or translation of a Source form, including but
      not limited to compiled object code, generated documentation,
      and conversions to other media types.

      "Work" shall mean the work of authorship, whether in Source or
      Object form, made available under the License, as indicated by a
      copyright notice that is included in or attached to the work
      (an example is provided in the Appendix below).

      "Derivative Works" shall mean any work, whether in Source or Object
      form, that is based on (or derived from) the Work and for which the
      editorial revisions, annotations, elaborations, or other modifications
      represent, as a whole, an original work of authorship. For the purposes
      of this License, Derivative Works shall not include works that remain
      separable from, or merely link (or bind by name) to the interfaces of,
      the Work and Derivative Works thereof.

      "Contribution" shall mean any work of authorship, including
      the original version of the Work and any modifications or additions
      to that Work or Derivative Works thereof, that is intentionally
      submitted to Licensor for inclusion in the Work by the copyright owner
      or by an individual or Legal Entity authorized to submit on behalf of
      the copyright owner. For the purposes of this definition, "submitted"
      means any form of electronic, verbal, or written communication sent
      to the Licensor or its representatives, including but not limited to
      communication on electronic mailing lists, source code control systems,
      and issue tracking systems that are managed by, or on behalf of, the
      Licensor for the purpose of discussing and improving the Work, but
      excluding communication that is conspicuously marked or otherwise
      designated in writing by the copyright owner as "Not a Contribution."

      "Contributor" shall mean Licensor and any individual or Legal Entity
      on behalf of whom a Contribution has been received by Licensor and
      subsequently incorporated within the Work.

   2. Grant of Copyright License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      copyright license to reproduce, prepare Derivative Works of,
      publicly display, publicly perform, sublicense, and distribute the
      Work and such Derivative Works in Source or Object form.

   3. Grant of Patent License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      (except as stated in this section) patent license to make, have made,
      use, offer to sell, sell, import, and otherwise transfer the Work,
      where such license applies only to those patent claims licensable
      by such Contributor that are necessarily infringed by their
      Contribution(s) alone or by combination of their Contribution(s)
      with the Work to which such Contribution(s) was submitted. If You
      institute patent litigation against any entity (including a
      cross-claim or counterclaim in a lawsuit) alleging that the Work
      or a Contribution incorporated within the Work constitutes direct
      or contributory patent infringement, then any patent licenses
      granted to You under this License for that Work shall terminate
      as of the date such litigation is filed.

   4. Redistribution. You may reproduce and distribute copies of the
      Work or Derivative Works thereof in any medium, with or without
      modifications, and in Source or Object form, provided that You
      meet the following conditions:

      (a) You must give any other recipients of the Work or
          Derivative Works a copy of this License; and

      (b) You must cause any modified files to carry prominent notices
          stating that You changed the files; and

      (c) You must retain, in the Source form of any Derivative Works
          that You distribute, all copyright, patent, trademark, and
          attribution notices from the Source form of the Work,
          excluding those notices that do not pertain to any part of
          the Derivative Works; and

      (d) If the Work includes a "NOTICE" text file as part of its
          distribution, then any Derivative Works that You distribute must
          include a readable copy of the attribution notices contained
          within such NOTICE file, excluding those notices that do not
          pertain to any part of the Derivative Works, in at least one
          of the following places: within a NOTICE text file distributed
          as part of the Derivative Works; within the Source form or
          documentation, if provided along with the Derivative Works; or,
          within a display generated by the Derivative Works, if and
          wherever such third-party notices normally appear. The contents
          of the NOTICE file are for informational purposes only and
          do not modify the License. You may add Your own attribution
          notices within Derivative Works that You distribute, alongside
          or as an addendum to the NOTICE text from the Work, provided
          that such additional attribution notices cannot be construed
          as modifying the License.

      You may add Your own copyright statement to Your modifications and
      may provide additional or different license terms and conditions
      for use, reproduction, or distribution of Your modifications, or
      for any such Derivative Works as a whole, provided Your use,
      reproduction, and distribution of the Work otherwise complies with
      the conditions stated in this License.

   5. Submission of Contributions. Unless You explicitly state otherwise,
      any Contribution intentionally submitted for inclusion in the Work
      by You to the Licensor shall be under the terms and conditions of
      this License, without any additional terms or conditions.
      Notwithstanding the above, nothing herein shall supersede or modify
      the terms of any separate license agreement you may have executed
      with Licensor regarding such Contributions.

   6. Trademarks. This License does not grant permission to use the trade
      names, trademarks, service marks, or product names of the Licensor,
      except as required for reasonable and customary use in describing the
      origin of the Work and reproducing the content of the NOTICE file.

   7. Disclaimer of Warranty. Unless required by applicable law or
      agreed to in writing, Licensor provides the Work (and each
      Contributor provides its Contributions) on an "AS IS" BASIS,
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      implied, including, without limitation, any warranties or conditions
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      PARTICULAR PURPOSE. You are solely responsible for determining the
      appropriateness of using or redistributing the Work and assume any
      risks associated with Your exercise of permissions under this License.

   8. Limitation of Liability. In no event and under no legal theory,
      whether in tort (including negligence), contract, or otherwise,
      unless required by applicable law (such as deliberate and grossly
      negligent acts) or agreed to in writing, shall any Contributor be
      liable to You for damages, including any direct, indirect, special,
      incidental, or consequential damages of any character arising as a
      result of this License or out of the use or inability to use the
      Work (including but not limited to damages for loss of goodwill,
      work stoppage, computer failure or malfunction, or any and all
      other commercial damages or losses), even if such Contributor
      has been advised of the possibility of such damages.

   9. Accepting Warranty or Additional Liability. While redistributing
      the Work or Derivative Works thereof, You may choose to offer,
      and charge a fee for, acceptance of support, warranty, indemnity,
      or other liability obligations and/or rights consistent with this
      License. However, in accepting such obligations, You may act only
      on Your own behalf and on Your sole responsibility, not on behalf
      of any other Contributor, and only if You agree to indemnify,
      defend, and hold each Contributor harmless for any liability
      incurred by, or claims asserted against, such Contributor by reason
      of your accepting any such warranty or additional liability.

   END OF TERMS AND CONDITIONS

   Copyright 2018-2025 The Olric Authors

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       https://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.


================================================
FILE: Makefile
================================================
.PHONY: test
test:
	go test -p 1 ./...

.PHONY: test-quick
test-quick:
	go test -p 1 -count=1 ./...

.PHONY: test-race
test-race:
	go test -p 1 -race ./...

.PHONY: format
format:
	go fmt ./...

.PHONY: prepare-merge
prepare-merge: format test

.PHONY: ci
ci: test

.PHONY: ci-quick
ci-full: test-quick

.PHONY: install
install:
	go install -ldflags="-s -w" -v ./cmd/*

================================================
FILE: README.md
================================================
# Olric [![Tweet](https://img.shields.io/twitter/url/http/shields.io.svg?style=social)](https://twitter.com/intent/tweet?text=Olric%3A+Distributed+and+in-memory+key%2Fvalue+database.+It+can+be+used+both+as+an+embedded+Go+library+and+as+a+language-independent+service.+&url=https://github.com/olric-data/olric/&hashtags=golang,distributed,database)

[![Go Reference](https://pkg.go.dev/badge/github.com/olric-data/olric/.svg)](https://pkg.go.dev/github.com/olric-data/olric/) [![Go Report Card](https://goreportcard.com/badge/olric-data/olric)](https://goreportcard.com/report/github.com/olric-data/olric/) [![Discord](https://img.shields.io/discord/721708998021087273.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/ahK7Vjr8We) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)

Distributed In-Memory Cache & Key/Value Store

Olric provides a simple way to create a **fast, scalable, and shared pool of RAM** across a cluster of machines. 
It's a distributed, in-memory key/value store and cache, written entirely in Go and designed specifically for distributed environments.

**Flexible Deployment:**

* **Embedded Go Library:** Integrate Olric directly into your Go applications.
* **Standalone Service:** Run Olric as a language-independent service.

**Key Features:**

* **Effortless Scalability:** Designed to handle hundreds of members and thousands of clients. New nodes auto-discover the cluster and linearly increase capacity.
* **Automatic Distribution:** Provides partitioning (sharding) and data re-balancing out-of-the-box, requiring no external coordination services. Data and backups are automatically balanced when capacity is added.
* **Wide Client Support:** Uses the standard **Redis Serialization Protocol (RESP)**, ensuring client libraries are available in nearly all major programming languages.
* **Common Use Cases:** Ideal for distributed caching, managing application cluster state, and implementing publish-subscribe messaging.

See [Docker](#docker) and [Samples](#samples) sections to get started! 

Join our [Discord server!](https://discord.gg/ahK7Vjr8We)

The current production version is [v0.7.0](https://github.com/olric-data/olric/tree/release/v0.7)

### About renaming the module

`github.com/buraksezer/olric` module has been renamed to `github.com/olric-data/olric`. This change has been effective since **v0.6.0**.
Importing previous versions should redirect you to the new repository, but you should change the import paths in your codebase as soon as possible.

There is no other difference between v0.5.7 and v0.6.0.

## At a glance

* Designed to share some transient, approximate, fast-changing data between servers,
* Uses Redis serialization protocol,
* Implements a distributed hash table,
* Provides a drop-in replacement for Redis Publish/Subscribe messaging system,
* Supports both programmatic and declarative configuration, 
* Embeddable but can be used as a language-independent service with *olric-server*,
* Supports different eviction algorithms (including LRU and TTL),
* Highly available and horizontally scalable,
* Provides best-effort consistency guarantees without being a complete CP (indeed PA/EC) solution,
* Supports replication by default (with sync and async options),
* Quorum-based voting for replica control (Read/Write quorums),
* Supports atomic operations,
* Provides an iterator on distributed maps,
* Provides a plugin interface for service discovery daemons,
* Provides a locking primitive which inspired by [SETNX of Redis](https://redis.io/commands/setnx#design-pattern-locking-with-codesetnxcode),

## Possible Use Cases

Olric is an eventually consistent, unordered key/value data store. It supports various eviction mechanisms for distributed caching implementations. Olric 
also provides publish-subscribe messaging, data replication, failure detection and simple anti-entropy services. 

It's good at distributed caching and publish/subscribe messaging.

## Table of Contents

* [Features](#features)
* [Support](#support)
* [Installing](#installing)
  * [Docker](#docker)
* [Getting Started](#getting-started)
  * [Operation Modes](#operation-modes)
    * [Embedded Member](#embedded-member)
    * [Client-Server](#client-server)
* [Golang Client](#golang-client)
* [Cluster Events](#cluster-events)
* [Authentication](#authentication)
* [Commands](#commands)
  * [Distributed Map](#distributed-map)
    * [DM.PUT](#dmput)
    * [DM.GET](#dmget)
    * [DM.DEL](#dmdel)
    * [DM.EXPIRE](#dmexpire)
    * [DM.PEXPIRE](#dmpexpire)
    * [DM.DESTROY](#dmdestroy)
    * [Atomic Operations](#atomic-operations)
      * [DM.INCR](#dmincr)
      * [DM.DECR](#dmdecr)
      * [DM.GETPUT](#dmgetput)
      * [DM.INCRBYFLOAT](#dmincrbyfloat)
    * [Locking](#locking)
      * [DM.LOCK](#dmlock)
      * [DM.UNLOCK](#dmunlock)
      * [DM.LOCKLEASE](#dmlocklease)
      * [DM.PLOCKLEASE](#dmplocklease)
    * [DM.SCAN](#dmscan)
  * [Publish-Subscribe](#publish-subscribe)
    * [SUBSCRIBE](#subscribe)
    * [PSUBSCRIBE](#psubscribe)
    * [UNSUBSCRIBE](#unsubscribe)
    * [PUNSUBSCRIBE](#punsubscribe)
    * [PUBSUB CHANNELS](#pubsub-channels)
    * [PUBSUB NUMPAT](#pubsub-numpat)
    * [PUBSUB NUMSUB](#pubsub-numsub)
    * [QUIT](#quit)
    * [PING](#ping)
  * [Cluster](#cluster)
    * [CLUSTER.ROUTINGTABLE](#clusterroutingtable)
    * [CLUSTER.MEMBERS](#clustermembers)
  * [Others](#others)
    * [PING](#ping)
    * [STATS](#stats)
    * [AUTH](#auth)
* [Configuration](#configuration)
    * [Embedded Member Mode](#embedded-member-mode)
      * [Manage the configuration in YAML format](#manage-the-configuration-in-yaml-format)
    * [Client-Server Mode](#client-server-mode)
    * [Network Configuration](#network-configuration)
    * [Service discovery](#service-discovery)
    * [Timeouts](#timeouts)
* [Architecture](#architecture)
  * [Overview](#overview)
  * [Consistency and Replication Model](#consistency-and-replication-model)
    * [Last-write-wins conflict resolution](#last-write-wins-conflict-resolution)
    * [PACELC Theorem](#pacelc-theorem)
    * [Read-Repair on DMaps](#read-repair-on-dmaps)
    * [Quorum-based Replica Control](#quorum-based-replica-control)
    * [Simple Split-Brain Protection](#simple-split-brain-protection)
  * [Eviction](#eviction)
    * [Expire with TTL](#expire-with-ttl)
    * [Expire with MaxIdleDuration](#expire-with-maxidleduration)
    * [Expire with LRU](#expire-with-lru)
  * [Lock Implementation](#lock-implementation)
  * [Storage Engine](#storage-engine)
* [Samples](#samples)
* [Contributions](#contributions)
* [License](#license)
* [About the name](#about-the-name)


## Features

* Designed to share some transient, approximate, fast-changing data between servers,
* Accepts arbitrary types as value,
* Only in-memory,
* Uses Redis protocol,
* Compatible with existing Redis clients,
* Embeddable but can be used as a language-independent service with olric-server,
* GC-friendly storage engine,
* O(1) running time for lookups,
* Supports atomic operations,
* Provides a lock implementation which can be used for non-critical purposes,
* Different eviction policies: LRU, MaxIdleDuration and Time-To-Live (TTL),
* Highly available,
* Horizontally scalable,
* Provides best-effort consistency guarantees without being a complete CP (indeed PA/EC) solution,
* Distributes load fairly among cluster members with a [consistent hash function](https://github.com/buraksezer/consistent),
* Supports replication by default (with sync and async options),
* Quorum-based voting for replica control,
* Thread-safe by default,
* Provides an iterator on distributed maps,
* Provides a plugin interface for service discovery daemons and cloud providers,
* Provides a locking primitive which inspired by [SETNX of Redis](https://redis.io/commands/setnx#design-pattern-locking-with-codesetnxcode),
* Provides a drop-in replacement of Redis' Publish-Subscribe messaging feature.

See the [Architecture](#architecture) section to see details.

## Support

We have a few communication channels: 

* [Issue Tracker](https://github.com/olric-data/olric/issues)
* [Discord server](https://discord.gg/ahK7Vjr8We)

You should know that the issue tracker is only intended for bug reports and feature requests.

Software doesn't maintain itself. If you need support on complex topics or request new features, please consider [sponsoring Olric](https://github.com/sponsors/buraksezer).

## Installing

With a correctly configured Golang environment:

```
go install github.com/olric-data/olric/cmd/olric-server@v0.7.0
```

Now you can start using Olric:

```
olric-server -c cmd/olric-server/olric-server-local.yaml
```

See the [Configuration](#configuration) section to create your cluster properly.

### Docker

You can launch `olric-server` Docker container by running the following command. 

```bash
docker pull ghcr.io/olric-data/olric:latest
``` 

This command will pull olric-server Docker image and run a new Olric Instance. You should know that the container exposes 
`3320` and `3322` ports. 

Now, you can access an Olric cluster using any Redis client including `redis-cli`:

```bash
redis-cli -p 3320
127.0.0.1:3320> DM.PUT my-dmap my-key "Olric Rocks!"
OK
127.0.0.1:3320> DM.GET my-dmap my-key
"Olric Rocks!"
127.0.0.1:3320>
```

## Getting Started

With olric-server, you can create an Olric cluster with a few commands. This is how to install olric-server:

```bash
go install github.com/olric-data/olric/cmd/olric-server@v0.7.0
```

Let's create a cluster with the following:

```
olric-server -c <YOUR_CONFIG_FILE_PATH>
```

You can find the sample configuration file under `cmd/olric-server/olric-server-local.yaml`. It can perfectly run with single node. 
olric-server also supports `OLRIC_SERVER_CONFIG` environment variable to set configuration. Just like that: 

```
OLRIC_SERVER_CONFIG=<YOUR_CONFIG_FILE_PATH> olric-server
```

Olric uses [hashicorp/memberlist](https://github.com/hashicorp/memberlist) for failure detection and cluster membership. 
Currently, there are different ways to discover peers in a cluster. You can use a static list of nodes in your configuration. 
It's ideal for development and test environments. Olric also supports Consul, Kubernetes and all well-known cloud providers
for service discovery. Please take a look at [Service Discovery](#service-discovery) section for further information.

See [Client-Server](#client-server) section to get more information about this deployment scenario.

#### Maintaining a list of peers manually

Basically, there is a list of nodes under `memberlist` block in the configuration file. In order to create an Olric cluster, 
you just need to add `Host:Port` pairs of the other nodes. Please note that the `Port` is the memberlist port of the peer.
It is `3322` by default. 

```yaml
memberlist:
  peers:
    - "localhost:3322"
```

Thanks to [hashicorp/memberlist](https://github.com/hashicorp/memberlist), Olric nodes can share the full list of members 
with each other. So an Olric node can discover the whole cluster by using a single member address.

#### Embedding into your Go application.

See [Samples](#samples) section to learn how to embed Olric into your existing Golang application.

### Operation Modes

Olric has two different operation modes.

#### Embedded Member

In Embedded Member Mode, members include both the application and Olric data and services. The advantage of the Embedded
Member Mode is having a low-latency data access and locality.

#### Client-Server

In Client-Server Mode, Olric data and services are centralized in one or more servers, and they are accessed by the 
application through clients. You can have a cluster of servers that can be independently created and scaled. Your clients 
communicate with these members to reach to Olric data and services on them.

Client-Server deployment has advantages including more predictable and reliable performance, easier identification
of problem causes and, most importantly, better scalability. When you need to scale in this deployment type, just add more
Olric server members. You can address client and server scalability concerns separately.

## Golang Client

The official Golang client is defined by the `Client` interface. There are two different implementations of that interface in 
this repository. `EmbeddedClient` provides a client implementation for [embedded-member](#embedded-member) scenario, 
`ClusterClient` provides an implementation of the same interface for [client-server](#client-server) deployment scenario. 
Obviously, you can use `ClusterClient` for your embedded-member deployments. But it's good to use `EmbeddedClient` provides 
a better performance due to localization of the queries.

See the client documentation on [pkg.go.dev](https://pkg.go.dev/github.com/olric-data/olric/@v0.7.0)

## Cluster Events

Olric can send push cluster events to `cluster.events` channel. Available cluster events:

* node-join-event
* node-left-event
* fragment-migration-event
* fragment-received-even

If you want to receive these events, set `true` to `EnableClusterEventsChannel` and subscribe to `cluster.events` channel. 
The default is `false`.

See the [events/cluster_events.go](events/cluster_events.go) file to get more information about events.

## Authentication

Olric supports simple password-based authentication to restrict access to the data store. This mechanism is similar to the 
`requirepass` directive in Redis and is intended to provide a basic level of protection in trusted environments (e.g., 
internal networks or local development).

> **Important**: This authentication method **does not provide transport-layer encryption or full access control**. For secure
> deployments over untrusted networks (e.g., Internet), it's strongly recommended to place Olric behind a reverse proxy with TLS 
> support or use a secure network overlay (e.g., WireGuard, VPN).

### YAML-based Configuration

You can enable password-based authentication by adding the `authentication` block to your configuration file:

```yaml
authentication:
  password: "your-password"
```

When this is set, all clients must authenticate using the provided password before performing any operations.

### Programmatic Configuration (Go API)

For applications embedding Olric or configuring it dynamically in Go, you can enable authentication as follows:

```go
c := config.New("local")
c.Authentication = &config.Authentication{
    Password: "your-password",
}
```

This sets the password required for any client to interact with the Olric node.

### Client-Side Usage

Clients must send the password using the [AUTH](#auth) command. If the password is incorrect or not provided, the connection will 
be denied or commands will be rejected.

With the cluster client, you can use `WithPassword` cluster client option.

```go
client, err := NewClusterClient([]string{db.name}, WithPassword("test-password"))
```

**Important:** The embedded client has not been covered by the authentication implementation.

## Commands

Olric uses Redis protocol and supports Redis-style commands to query the database. You can use any Redis client, including
`redis-cli`. The official Go client is a thin layer around [go-redis/redis](https://github.com/go-redis/redis) package. 
See [Golang Client](#golang-client) section for the documentation.

### Distributed Map

#### DM.PUT 

DM.PUT sets the value for the given key. It overwrites any previous value for that key.

```
DM.PUT dmap key value [ EX seconds | PX milliseconds | EXAT unix-time-seconds | PXAT unix-time-milliseconds ] [ NX | XX]
```

**Example:**
```
127.0.0.1:3320> DM.PUT my-dmap my-key value
OK
```

**Options:**

The DM.PUT command supports a set of options that modify its behavior:

* **EX** *seconds* -- Set the specified expire time, in seconds.
* **PX** *milliseconds* -- Set the specified expire time, in milliseconds.
* **EXAT** *timestamp-seconds* -- Set the specified Unix time at which the key will expire, in seconds.
* **PXAT** *timestamp-milliseconds* -- Set the specified Unix time at which the key will expire, in milliseconds.
* **NX** -- Only set the key if it does not already exist.
* **XX** -- Only set the key if it already exist.

**Return:**

* **Simple string reply:** OK if DM.PUT was executed correctly.
* **KEYFOUND:** (error) if the DM.PUT operation was not performed because the user specified the NX option but the condition was not met.
* **KEYNOTFOUND:** (error) if the DM.PUT operation was not performed because the user specified the XX option but the condition was not met.

#### DM.GET

DM.GET gets the value for the given key. It returns (error)`KEYNOTFOUND` if the key doesn't exist. 

```
DM.GET dmap key
```

**Example:**

```
127.0.0.1:3320> DM.GET dmap key
"value"
```

**Return:**

**Bulk string reply**: the value of key, or (error)`KEYNOTFOUND` when key does not exist.

#### DM.DEL

DM.DEL deletes values for the given keys. It doesn't return any error if the key does not exist.

```
DM.DEL dmap key [key...]
```

**Example:**

```
127.0.0.1:3320> DM.DEL dmap key1 key2
(integer) 2
```

**Return:**

* **Integer reply**: The number of keys that were removed.

#### DM.EXPIRE

DM.EXPIRE updates or sets the timeout for the given key. It returns `KEYNOTFOUND` if the key doesn't exist. After the timeout has expired, 
the key will automatically be deleted. 

The timeout will only be cleared by commands that delete or overwrite the contents of the key, including DM.DEL, DM.PUT, DM.GETPUT.

```
DM.EXPIRE dmap key seconds
```

**Example:**

```
127.0.0.1:3320> DM.EXPIRE dmap key 1
OK
```

**Return:**

* **Simple string reply:** OK if DM.EXPIRE was executed correctly.
* **KEYNOTFOUND:** (error) when key does not exist.

#### DM.PEXPIRE

DM.PEXPIRE updates or sets the timeout for the given key. It returns `KEYNOTFOUND` if the key doesn't exist. After the timeout has expired,
the key will automatically be deleted.

The timeout will only be cleared by commands that delete or overwrite the contents of the key, including DM.DEL, DM.PUT, DM.GETPUT.

```
DM.PEXPIRE dmap key milliseconds
```

**Example:**

```
127.0.0.1:3320> DM.PEXPIRE dmap key 1000
OK
```

**Return:**

* **Simple string reply:** OK if DM.EXPIRE was executed correctly.
* **KEYNOTFOUND:** (error) when key does not exist.

#### DM.DESTROY

DM.DESTROY flushes the given DMap on the cluster. You should know that there is no global lock on DMaps. DM.PUT and DM.DESTROY commands
may run concurrently on the same DMap. 

```
DM.DESTROY dmap
```

**Example:**

```
127.0.0.1:3320> DM.DESTROY dmap
OK
```

**Return:**

* **Simple string reply:** OK, if DM.DESTROY was executed correctly.

### Atomic Operations

Operations on key/value pairs are performed by the partition owner. In addition, atomic operations are guarded by a lock implementation which can be found under `internal/locker`. It means that
Olric guaranties consistency of atomic operations, if there is no network partition. Basic flow for `DM.INCR`:

* Acquire the lock for the given key,
* Call `DM.GET` to retrieve the current value,
* Calculate the new value,
* Call `DM.PUT` to set the new value,
* Release the lock.

It's important to know that if you call `DM.PUT` and `DM.GETPUT` concurrently on the same key, this will break the atomicity.

`internal/locker` package is provided by [Docker](https://github.com/moby/moby).

**Important note about consistency:**

You should know that Olric is a PA/EC (see [Consistency and Replication Model](#consistency-and-replication-model)) product. So if your network is stable, all the operations on key/value
pairs are performed by a single cluster member. It means that you can be sure about the consistency when the cluster is stable. It's important to know that computer networks fail
occasionally, processes crash and random GC pauses may happen. Many factors can lead a network partitioning. If you cannot tolerate losing strong consistency under network partitioning,
you need to use a different tool for atomic operations.

See [Hazelcast and the Mythical PA/EC System](https://dbmsmusings.blogspot.com/2017/10/hazelcast-and-mythical-paec-system.html) and [Jepsen Analysis on Hazelcast 3.8.3](https://hazelcast.com/blog/jepsen-analysis-hazelcast-3-8-3/) for more insight on this topic.


#### DM.INCR

DM.INCR atomically increments the number stored at key by delta. The return value is the new value after being incremented or an error.

```
DM.INCR dmap key delta
```

**Example:**

```
127.0.0.1:3320> DM.INCR dmap key 10
(integer) 10
```

**Return:**

* **Integer reply:** the value of key after the increment.

#### DM.DECR

DM.DECR atomically decrements the number stored at key by delta. The return value is the new value after being incremented or an error.

```
DM.DECR dmap key delta
```

**Example:**

```
127.0.0.1:3320> DM.DECR dmap key 10
(integer) 0
```

**Return:**

* **Integer reply:** the value of key after the increment.

#### DM.GETPUT

DM.GETPUT atomically sets key to value and returns the old value stored at the key.

```
DM.GETPUT dmap key value
```

**Example:**

```
127.0.0.1:3320> DM.GETPUT dmap key value-1
(nil)
127.0.0.1:3320> DM.GETPUT dmap key value-2
"value-1"
```

**Return:**

* **Bulk string reply**: the old value stored at the key.

#### DM.INCRBYFLOAT

DM.INCRBYFLOAT atomically increments the number stored at key by delta. The return value is the new value after being incremented or an error.

```
DM.INCRBYFLOAT dmap key delta
```

**Example:**

```
127.0.0.1:3320> DM.PUT dmap key 10.50
OK
127.0.0.1:3320> DM.INCRBYFLOAT dmap key 0.1
"10.6"
127.0.0.1:3320> DM.PUT dmap key 5.0e3
OK
127.0.0.1:3320> DM.INCRBYFLOAT dmap key 2.0e2
"5200"
```

**Return:**

* **Bulk string reply**: the value of key after the increment.


### Locking

**Important:** The lock provided by DMap implementation is approximate and only to be used for non-critical purposes.

The DMap implementation is already thread-safe to meet your thread safety requirements. When you want to have more control on the
concurrency, you can use **DM.LOCK** command. Olric borrows the locking algorithm from Redis. Redis authors propose
the following algorithm:

> The command <SET resource-name anystring NX EX max-lock-time> is a simple way to implement a locking system with Redis.
>
> A client can acquire the lock if the above command returns OK (or retry after some time if the command returns Nil), and remove the lock just using DEL.
>
> The lock will be auto-released after the expire time is reached.
>
> It is possible to make this system more robust modifying the unlock schema as follows:
>
> Instead of setting a fixed string, set a non-guessable large random string, called token.
> Instead of releasing the lock with DEL, send a script that only removes the key if the value matches.
> This avoids that a client will try to release the lock after the expire time deleting the key created by another client that acquired the lock later.

Equivalent of `SETNX` command in Olric is `DM.PUT dmap key value NX`. DM.LOCK command are properly implements
the algorithm which is proposed above.

You should know that this implementation is subject to the clustering algorithm. So there is no guarantee about reliability in the case of network partitioning. I recommend the lock implementation to be used for
efficiency purposes in general, instead of correctness.

**Important note about consistency:**

You should know that Olric is a PA/EC (see [Consistency and Replication Model](#consistency-and-replication-model)) product. So if your network is stable, all the operations on key/value
pairs are performed by a single cluster member. It means that you can be sure about the consistency when the cluster is stable. It's important to know that computer networks fail
occasionally, processes crash and random GC pauses may happen. Many factors can lead a network partitioning. If you cannot tolerate losing strong consistency under network partitioning,
you need to use a different tool for locking.

See [Hazelcast and the Mythical PA/EC System](https://dbmsmusings.blogspot.com/2017/10/hazelcast-and-mythical-paec-system.html) and [Jepsen Analysis on Hazelcast 3.8.3](https://hazelcast.com/blog/jepsen-analysis-hazelcast-3-8-3/) for more insight on this topic.

#### DM.LOCK

DM.LOCK sets a lock for the given key. The acquired lock is only valid for the key in this DMap.
It returns immediately if it acquires the lock for the given key. Otherwise, it waits until deadline.

DM.LOCK returns a token. You must keep that token to unlock the key. Using prefixed keys is highly recommended.
If the key does already exist in the DMap, DM.LOCK will wait until the deadline is exceeded.

```
DM.LOCK dmap key seconds [ EX seconds | PX milliseconds ]
```

**Options:**

* **EX** *seconds* -- Set the specified expire time, in seconds.
* **PX** *milliseconds* -- Set the specified expire time, in milliseconds.

**Example:**

```
127.0.0.1:3320> DM.LOCK dmap lock.key 10
2363ec600be286cb10fbb35181efb029
```

**Return:**

* **Simple string reply:** a token to unlock or lease the lock.
* **NOSUCHLOCK**: (error) returned when the requested lock does not exist.
* **LOCKNOTACQUIRED**: (error) returned when the requested lock could not be acquired.

#### DM.UNLOCK

DM.UNLOCK releases an acquired lock for the given key. It returns `NOSUCHLOCK` if there is no lock for the given key.

```
DM.UNLOCK dmap key token
```

**Example:**

```
127.0.0.1:3320> DM.UNLOCK dmap key 2363ec600be286cb10fbb35181efb029
OK
```

**Return:**

* **Simple string reply:** OK if DM.UNLOCK was executed correctly.
* **NOSUCHLOCK**: (error) returned when the lock does not exist.

#### DM.LOCKLEASE

DM.LOCKLEASE sets or updates the timeout of the acquired lock for the given key. It returns `NOSUCHLOCK` if there is no lock for the given key.

DM.LOCKLEASE accepts seconds as timeout.

```
DM.LOCKLEASE dmap key token seconds
```

**Example:**

```
127.0.0.1:3320> DM.LOCKLEASE dmap key 2363ec600be286cb10fbb35181efb029 100
OK
```

**Return:**

* **Simple string reply:** OK if DM.UNLOCK was executed correctly.
* **NOSUCHLOCK**: (error) returned when the lock does not exist.

#### DM.PLOCKLEASE

DM.PLOCKLEASE sets or updates the timeout of the acquired lock for the given key. It returns `NOSUCHLOCK` if there is no lock for the given key.

DM.PLOCKLEASE accepts milliseconds as timeout.

```
DM.LOCKLEASE dmap key token milliseconds
```

**Example:**

```
127.0.0.1:3320> DM.PLOCKLEASE dmap key 2363ec600be286cb10fbb35181efb029 1000
OK
```

**Return:**

* **Simple string reply:** OK if DM.PLOCKLEASE was executed correctly.
* **NOSUCHLOCK**: (error) returned when the lock does not exist.

#### DM.SCAN

DM.SCAN is a cursor based iterator. This means that at every call of the command, the server returns an updated cursor 
that the user needs to use as the cursor argument in the next call.

An iteration starts when the cursor is set to 0, and terminates when the cursor returned by the server is 0. The iterator runs
locally on every partition. So you need to know the partition count. If the returned cursor is 0 for a particular partition,
you have to start scanning the next partition. 

```
DM.SCAN partID dmap cursor [ MATCH pattern | COUNT count ]
```

**Example:**

```
127.0.0.1:3320> DM.SCAN 3 bench 0
1) "96990"
2)  1) "memtier-2794837"
    2) "memtier-8630933"
    3) "memtier-6415429"
    4) "memtier-7808686"
    5) "memtier-3347072"
    6) "memtier-4247791"
    7) "memtier-3931982"
    8) "memtier-7164719"
    9) "memtier-4710441"
   10) "memtier-8892916"
127.0.0.1:3320> DM.SCAN 3 bench 96990
1) "193499"
2)  1) "memtier-429905"
    2) "memtier-1271812"
    3) "memtier-7835776"
    4) "memtier-2717575"
    5) "memtier-95312"
    6) "memtier-2155214"
    7) "memtier-123931"
    8) "memtier-2902510"
    9) "memtier-2632291"
   10) "memtier-1938450"
```
### Publish-Subscribe

**SUBSCRIBE**, **UNSUBSCRIBE** and **PUBLISH** implement the Publish/Subscribe messaging paradigm where 
senders are not programmed to send their messages to specific receivers. Rather, published messages are characterized 
into channels, without knowledge of what (if any) subscribers there may be. Subscribers express interest in one or more 
channels, and only receive messages that are of interest, without knowledge of what (if any) publishers there are. 
This decoupling of publishers and subscribers can allow for greater scalability and a more dynamic network topology.

**Important note:** In an Olric cluster, clients can subscribe to every node, and can also publish to every other node. The cluster
will make sure that published messages are forwarded as needed.

*Source of this section: [https://redis.io/commands/?group=pubsub](https://redis.io/commands/?group=pubsub)*

#### SUBSCRIBE

Subscribes the client to the specified channels.

```
SUBSCRIBE channel [channel...]
```

Once the client enters the subscribed state it is not supposed to issue any other commands, except for additional **SUBSCRIBE**, 
**PSUBSCRIBE**, **UNSUBSCRIBE**, **PUNSUBSCRIBE**, **PING**, and **QUIT** commands.

#### PSUBSCRIBE

Subscribes the client to the given patterns.

```
PSUBSCRIBE pattern [ pattern ...]
```

Supported glob-style patterns:

* `h?llo` subscribes to hello, hallo and hxllo
* `h*llo` subscribes to hllo and heeeello
* `h[ae]llo` subscribes to hello and hallo, but not hillo
* Use **\\** to escape special characters if you want to match them verbatim.

#### UNSUBSCRIBE

Unsubscribes the client from the given channels, or from all of them if none is given.

```
UNSUBSCRIBE [channel [channel ...]]
```

When no channels are specified, the client is unsubscribed from all the previously subscribed channels. In this case, 
a message for every unsubscribed channel will be sent to the client.

#### PUNSUBSCRIBE

Unsubscribes the client from the given patterns, or from all of them if none is given.

```
PUNSUBSCRIBE [pattern [pattern ...]]
```

When no patterns are specified, the client is unsubscribed from all the previously subscribed patterns. In this case, 
a message for every unsubscribed pattern will be sent to the client.

#### PUBSUB CHANNELS

Lists the currently active channels.

```
PUBSUB CHANNELS [pattern]
```

An active channel is a Pub/Sub channel with one or more subscribers (excluding clients subscribed to patterns).

If no pattern is specified, all the channels are listed, otherwise if pattern is specified only channels matching the 
specified glob-style pattern are listed.

#### PUBSUB NUMPAT

Returns the number of unique patterns that are subscribed to by clients (that are performed using the PSUBSCRIBE command).

```
PUBSUB NUMPAT
```

Note that this isn't the count of clients subscribed to patterns, but the total number of unique patterns all the clients are subscribed to.

**Important note**: In an Olric cluster, clients can subscribe to every node, and can also publish to every other node. The cluster 
will make sure that published messages are forwarded as needed. That said, PUBSUB's replies in a cluster only report information 
from the node's Pub/Sub context, rather than the entire cluster.

#### PUBSUB NUMSUB

Returns the number of subscribers (exclusive of clients subscribed to patterns) for the specified channels.

```
PUBSUB NUMSUB [channel [channel ...]]
```
Note that it is valid to call this command without channels. In this case it will just return an empty list.

**Important note**: In an Olric cluster, clients can subscribe to every node, and can also publish to every other node. The cluster 
will make sure that published messages are forwarded as needed. That said, PUBSUB's replies in a cluster only report information 
from the node's Pub/Sub context, rather than the entire cluster.

#### QUIT

Ask the server to close the connection. The connection is closed as soon as all pending replies have been written to the client.

```
QUIT
```
### Cluster

#### CLUSTER.ROUTINGTABLE

CLUSTER.ROUTINGTABLE returns the latest view of the routing table. Simply, it's a data structure that maps
partitions to members.

```
CLUSTER.ROUTINGTABLE
```

**Example:**

```
127.0.0.1:3320> CLUSTER.ROUTINGTABLE
 1) 1) (integer) 0
     2) 1) "127.0.0.1:3320"
     3) (empty array)
  2) 1) (integer) 1
     2) 1) "127.0.0.1:3320"
     3) (empty array)
  3) 1) (integer) 2
     2) 1) "127.0.0.1:3320"
     3) (empty array)
```

It returns an array of arrays. 

**Fields:**

```
1) (integer) 0 <- Partition ID
  2) 1) "127.0.0.1:3320" <- Array of the current and previous primary owners
  3) (empty array) <- Array of backup owners. 
```

#### CLUSTER.MEMBERS

CLUSTER.MEMBERS returns an array of known members by the server.

```
CLUSTER.MEMBERS
```

**Example:**

```
127.0.0.1:3320> CLUSTER.MEMBERS
1) 1) "127.0.0.1:3320"
   2) (integer) 1652619388427137000
   3) "true"
```

**Fields:**

```
1) 1) "127.0.0.1:3320" <- Member's name in the cluster
   2) (integer) 1652619388427137000 <-Member's birthedate
   3) "true" <- Is cluster coordinator (the oldest node)
```

### Others

#### PING

Returns PONG if no argument is provided, otherwise return a copy of the argument as a bulk. This command is often used to
test if a connection is still alive, or to measure latency.

```
PING
```

#### STATS

The STATS command returns information and statistics about the server in JSON format. See `stats/stats.go` file.

```
127.0.0.1:3320> STATS
<a large string in JSON format>
```

#### AUTH

`AUTH` authenticates the client using the given password:

```
127.0.0.1:3320> AUTH your-password
OK
```

Unauthenticated clients get `NOAUTH` error:

```
127.0.0.1:3320> DMAP.PUT dmap key value
(error) NOAUTH Authentication required.
```

If you try to authenticate the client but the server is not configured, Olric returns the following error:

```
127.0.0.1:3320> AUTH your-password
(error) ERR AUTH <password> called without any password configured for the default user. Are you sure your configuration is correct?
```

## Configuration

Olric supports both declarative and programmatic configurations. You can choose one of them depending on your needs.
You should feel free to ask any questions about configuration and integration. Please see [Support](#support) section.

### Embedded-Member Mode

#### Programmatic Configuration
Olric provides a function to generate default configuration to use in embedded-member mode:

```go
import "github.com/olric-data/olric/config"
...
c := config.New("local")
```

The `New` function takes a parameter called `env`. It denotes the network environment and consumed by [hashicorp/memberlist](https://github.com/hashicorp/memberlist). 
Default configuration is good enough for distributed caching scenario. In order to see all configuration parameters, please take a look at [this](https://godoc.org/github.com/olric-data/olric/config).

See [Sample Code](#sample-code) section for an introduction.

#### Declarative configuration with YAML format

You can also import configuration from a YAML file by using the `Load` function:

```go
c, err := config.Load(path/to/olric.yaml)
```

A sample configuration file in YAML format can be found [here](https://github.com/olric-data/olric/blob/master/cmd/olric-server/olric-server.yaml). This may be the most appropriate way to manage the Olric configuration.


### Client-Server Mode

Olric provides **olric-server** to implement client-server mode. olric-server gets a YAML file for the configuration. The most basic  functionality of olric-server is that 
translating YAML configuration into Olric's configuration struct. A sample `olric-server.yaml` file  is being provided [here](https://github.com/olric-data/olric/blob/master/cmd/olric-server/olric-server.yaml).

### Network Configuration

In an Olric instance, there are two different TCP servers. One for Olric, and the other one is for memberlist. `BindAddr` is very
critical to deploy a healthy Olric node. There are different scenarios:

* You can freely set a domain name or IP address as `BindAddr` for both Olric and memberlist. Olric will resolve and use it to bind.
* You can freely set `localhost`, `127.0.0.1` or `::1` as `BindAddr` in development environment for both Olric and memberlist.
* You can freely set `0.0.0.0` as `BindAddr` for both Olric and memberlist. Olric will pick an IP address, if there is any.
* If you don't set `BindAddr`, hostname will be used, and it will be resolved to get a valid IP address.
* You can set a network interface by using `Config.Interface` and `Config.MemberlistInterface` fields. Olric will find an appropriate IP address for the given interfaces, if there is any.
* You can set both `BindAddr` and interface parameters. In this case Olric will ensure that `BindAddr` is available on the given interface.

You should know that Olric needs a single and stable IP address to function properly. If you don't know the IP address of the host at the deployment time, 
you can set `BindAddr` as `0.0.0.0`. Olric will very likely to find an IP address for you.

### Service Discovery

Olric provides a service discovery interface which can be used to implement plugins. 

We currently have a bunch of service discovery plugins for automatic peer discovery on cloud environments:

* [olric-data/olric-consul-plugin](https://github.com/olric-data/olric-consul-plugin) provides a plugin using Consul.
* [olric-data/olric-cloud-plugin](https://github.com/olric-data/olric-cloud-plugin) provides a plugin for well-known cloud providers. Including Kubernetes.
* [justinfx/olric-nats-plugin](https://github.com/justinfx/olric-nats-plugin) provides a plugin using nats.io

In order to get more info about installation and configuration of the plugins, see their GitHub page. 

### Timeouts

Olric nodes supports setting `KeepAlivePeriod` on TCP sockets. 

**Server-side:**

##### config.KeepAlivePeriod 

KeepAlivePeriod denotes whether the operating system should send keep-alive messages on the connection.

**Client-side:**
 
##### config.DialTimeout

Timeout for TCP dial. The timeout includes name resolution, if required. When using TCP, and the host in the address 
parameter resolves to multiple IP addresses, the timeout is spread over each consecutive dial, such that each is
given an appropriate fraction of the time to connect.

##### config.ReadTimeout

Timeout for socket reads. If reached, commands will fail with a timeout instead of blocking. Use value -1 for no 
timeout and 0 for default. The default is config.DefaultReadTimeout

##### config.WriteTimeout

Timeout for socket writes. If reached, commands will fail with a timeout instead of blocking. The default is config.DefaultWriteTimeout

## Architecture

### Overview

Olric uses:
* [hashicorp/memberlist](https://github.com/hashicorp/memberlist) for cluster membership and failure detection,
* [buraksezer/consistent](https://github.com/buraksezer/consistent) for consistent hashing and load balancing,
* [Redis Serialization Protocol](https://github.com/tidwall/redcon) for communication.

Olric distributes data among partitions. Every partition is being owned by a cluster member and may have one or more backups for redundancy. 
When you read or write a DMap entry, you transparently talk to the partition owner. Each request hits the most up-to-date version of a
particular data entry in a stable cluster.

In order to find the partition which the key belongs to, Olric hashes the key and mod it with the number of partitions:

```
partID = MOD(hash result, partition count)
```

The partitions are being distributed among cluster members by using a consistent hashing algorithm. In order to get details, please see [buraksezer/consistent](https://github.com/buraksezer/consistent). 

When a new cluster is created, one of the instances is elected as the **cluster coordinator**. It manages the partition table: 

* When a node joins or leaves, it distributes the partitions and their backups among the members again,
* Removes empty previous owners from the partition owners list,
* Pushes the new partition table to all the members,
* Pushes the partition table to the cluster periodically.

Members propagate their birthdate(POSIX time in nanoseconds) to the cluster. The coordinator is the oldest member in the cluster.
If the coordinator leaves the cluster, the second oldest member gets elected as the coordinator.

Olric has a component called **rebalancer** which is responsible for keeping underlying data structures consistent:

* Works on every node,
* When a node joins or leaves, the cluster coordinator pushes the new partition table. Then, the **rebalancer** runs immediately and moves the partitions and backups to their new hosts,
* Merges fragmented partitions.

Partitions have a concept called **owners list**. When a node joins or leaves the cluster, a new primary owner may be assigned by the 
coordinator. At any time, a partition may have one or more partition owners. If a partition has two or more owners, this is called **fragmented partition**. 
The last added owner is called **primary owner**. Write operation is only done by the primary owner. The previous owners are only used for read and delete.

When you read a key, the primary owner tries to find the key on itself, first. Then, queries the previous owners and backups, respectively.
The delete operation works the same way.

The data(distributed map objects) in the fragmented partition is moved slowly to the primary owner by the **rebalancer**. Until the move is done,
the data remains available on the previous owners. The DMap methods use this list to query data on the cluster.

*Please note that, 'multiple partition owners' is an undesirable situation and the **rebalancer** component is designed to fix that in a short time.*

### Consistency and Replication Model

**Olric is an AP product** in the context of [CAP theorem](https://en.wikipedia.org/wiki/CAP_theorem), which employs the combination of primary-copy 
and [optimistic replication](https://en.wikipedia.org/wiki/Optimistic_replication) techniques. With optimistic replication, when the partition owner 
receives a write or delete operation for a key, applies it locally, and propagates it to the backup owners.

This technique enables Olric clusters to offer high throughput. However, due to temporary situations in the system, such as network
failure, backup owners can miss some updates and diverge from the primary owner. If a partition owner crashes while there is an
inconsistency between itself and the backups, strong consistency of the data can be lost.

Two types of backup replication are available: **sync** and **async**. Both types are still implementations of the optimistic replication
model.

* **sync**: Blocks until write/delete operation is applied by backup owners.
* **async**: Just fire & forget.

#### Last-write-wins conflict resolution

Every time a piece of data is written to Olric, a timestamp is attached by the client. Then, when Olric has to deal with conflict data in the case 
of network partitioning, it simply chooses the data with the most recent timestamp. This called LWW conflict resolution policy.

#### PACELC Theorem

From Wikipedia:

> In theoretical computer science, the [PACELC theorem](https://en.wikipedia.org/wiki/PACELC_theorem) is an extension to the [CAP theorem](https://en.wikipedia.org/wiki/CAP_theorem). It states that in case of network partitioning (P) in a 
> distributed computer system, one has to choose between availability (A) and consistency (C) (as per the CAP theorem), but else (E), even when the system is 
> running normally in the absence of partitions, one has to choose between latency (L) and consistency (C).

In the context of PACELC theorem, Olric is a **PA/EC** product. It means that Olric is considered to be **consistent** data store if the network is stable. 
Because the key space is divided between partitions and every partition is controlled by its primary owner. All operations on DMaps are redirected to the 
partition owner. 

In the case of network partitioning, Olric chooses **availability** over consistency. So that you can still access some parts of the cluster when the network is unreliable, 
but the cluster may return inconsistent results.  

Olric implements read-repair and quorum based voting system to deal with inconsistencies in the DMaps. 

Readings on PACELC theorem:
* [Please stop calling databases CP or AP](https://martin.kleppmann.com/2015/05/11/please-stop-calling-databases-cp-or-ap.html)
* [Problems with CAP, and Yahoo’s little known NoSQL system](https://dbmsmusings.blogspot.com/2010/04/problems-with-cap-and-yahoos-little.html)
* [A Critique of the CAP Theorem](https://arxiv.org/abs/1509.05393)
* [Hazelcast and the Mythical PA/EC System](https://dbmsmusings.blogspot.com/2017/10/hazelcast-and-mythical-paec-system.html)

#### Read-Repair on DMaps

Read repair is a feature that allows for inconsistent data to be fixed at query time. Olric tracks every write operation with a timestamp value and assumes 
that the latest write operation is the valid one. When you want to access a key/value pair, the partition owner retrieves all available copies for that pair
and compares the timestamp values. The latest one is the winner. If there is some outdated version of the requested pair, the primary owner propagates the latest
version of the pair. 

Read-repair is disabled by default for the sake of performance. If you have a use case that requires a more strict consistency control than a distributed caching 
scenario, you can enable read-repair via the configuration. 

#### Quorum-based replica control

Olric implements Read/Write quorum to keep the data in a consistent state. When you start a write operation on the cluster and write quorum (W) is 2, 
the partition owner tries to write the given key/value pair on its own data storage and on the replica nodes. If the number of successful write operations 
is below W, the primary owner returns `ErrWriteQuorum`. The read flow is the same: if you have R=2 and the owner only access one of the replicas, 
it returns `ErrReadQuorum`.

#### Simple Split-Brain Protection

Olric implements a technique called *majority quorum* to manage split-brain conditions. If a network partitioning occurs, and some members
lost the connection to rest of the cluster, they immediately stops functioning and return an error to incoming requests. This behaviour is controlled by
`MemberCountQuorum` parameter. It's default `1`. 

When the network healed, the stopped nodes joins again the cluster and fragmented partitions is merged by their primary owners in accordance with 
*LWW policy*. Olric also implements an *ownership report* mechanism to fix inconsistencies in partition distribution after a partitioning event. 

### Eviction
Olric supports different policies to evict keys from distributed maps. 

#### Expire with TTL
Olric implements TTL eviction policy. It shares the same algorithm with [Redis](https://redis.io/commands/expire#appendix-redis-expires):

> Periodically Redis tests a few keys at random among keys with an expire set. All the keys that are already expired are deleted from the keyspace.
>
> Specifically this is what Redis does 10 times per second:
>
> * Test 20 random keys from the set of keys with an associated expire.
> * Delete all the keys found expired.
> * If more than 25% of keys were expired, start again from step 1.
>
> This is a trivial probabilistic algorithm, basically the assumption is that our sample is representative of the whole key space, and we continue to expire until the percentage of keys that are likely to be expired is under 25%

When a client tries to access a key, Olric returns `ErrKeyNotFound` if the key is found to be timed out. A background task evicts keys with the algorithm described above.

#### Expire with MaxIdleDuration

Maximum time for each entry to stay idle in the DMap. It limits the lifetime of the entries relative to the time of the last read 
or write access performed on them. The entries whose idle period exceeds this limit are expired and evicted automatically. 
An entry is idle if no Get, Put, PutEx, Expire, PutIf, PutIfEx on it. Configuration of MaxIdleDuration feature varies by 
preferred deployment method. 

#### Expire with LRU

Olric implements LRU eviction method on DMaps. Approximated LRU algorithm is borrowed from Redis. The Redis authors proposes the following algorithm:

> It is important to understand that the eviction process works like this:
> 
> * A client runs a new command, resulting in more data added.
> * Redis checks the memory usage, and if it is greater than the maxmemory limit , it evicts keys according to the policy.
> * A new command is executed, and so forth.
>
> So we continuously cross the boundaries of the memory limit, by going over it, and then by evicting keys to return back under the limits.
>
> If a command results in a lot of memory being used (like a big set intersection stored into a new key) for some time the memory 
> limit can be surpassed by a noticeable amount. 
>
> **Approximated LRU algorithm**
>
> Redis LRU algorithm is not an exact implementation. This means that Redis is not able to pick the best candidate for eviction, 
> that is, the access that was accessed the most in the past. Instead it will try to run an approximation of the LRU algorithm, 
> by sampling a small number of keys, and evicting the one that is the best (with the oldest access time) among the sampled keys.

Olric tracks access time for every DMap instance. Then it picks and sorts some configurable amount of keys to select keys for eviction.
Every node runs this algorithm independently. The access log is moved along with the partition when a network partition is occured.

#### Configuration of eviction mechanisms

Here is a simple configuration block for `olric-server.yaml`: 

```
cache:
  numEvictionWorkers: 1
  maxIdleDuration: ""
  ttlDuration: "100s"
  maxKeys: 100000
  maxInuse: 1000000 # in bytes
  lRUSamples: 10
  evictionPolicy: "LRU" # NONE/LRU
```

You can also set cache configuration per DMap. Here is a simple configuration for a DMap named `mydmap`:

```
dmaps:
  mydmap:
    maxIdleDuration: "60s"
    ttlDuration: "300s"
    maxKeys: 500000 # in-bytes
    lRUSamples: 20
    evictionPolicy: "NONE" # NONE/LRU
```

If you prefer embedded-member deployment scenario, please take a look at [config#CacheConfig](https://godoc.org/github.com/olric-data/olric/config#CacheConfig) and [config#DMapCacheConfig](https://godoc.org/github.com/olric-data/olric/config#DMapCacheConfig) for the configuration.


### Lock Implementation

The DMap implementation is already thread-safe to meet your thread safety requirements. When you want to have more control on the
concurrency, you can use **LockWithTimeout** and **Lock** methods. Olric borrows the locking algorithm from Redis. Redis authors propose
the following algorithm:

> The command <SET resource-name anystring NX EX max-lock-time> is a simple way to implement a locking system with Redis.
>
> A client can acquire the lock if the above command returns OK (or retry after some time if the command returns Nil), and remove the lock just using DEL.
>
> The lock will be auto-released after the expire time is reached.
>
> It is possible to make this system more robust modifying the unlock schema as follows:
>
> Instead of setting a fixed string, set a non-guessable large random string, called token.
> Instead of releasing the lock with DEL, send a script that only removes the key if the value matches.
> This avoids that a client will try to release the lock after the expire time deleting the key created by another client that acquired the lock later.

Equivalent of`SETNX` command in Olric is `PutIf(key, value, IfNotFound)`. Lock and LockWithTimeout commands are properly implements
the algorithm which is proposed above. 

You should know that this implementation is subject to the clustering algorithm. So there is no guarantee about reliability in the case of network partitioning. I recommend the lock implementation to be used for 
efficiency purposes in general, instead of correctness.

**Important note about consistency:**

You should know that Olric is a PA/EC (see [Consistency and Replication Model](#consistency-and-replication-model)) product. So if your network is stable, all the operations on key/value 
pairs are performed by a single cluster member. It means that you can be sure about the consistency when the cluster is stable. It's important to know that computer networks fail 
occasionally, processes crash and random GC pauses may happen. Many factors can lead a network partitioning. If you cannot tolerate losing strong consistency under network partitioning, 
you need to use a different tool for locking.

See [Hazelcast and the Mythical PA/EC System](https://dbmsmusings.blogspot.com/2017/10/hazelcast-and-mythical-paec-system.html) and [Jepsen Analysis on Hazelcast 3.8.3](https://hazelcast.com/blog/jepsen-analysis-hazelcast-3-8-3/) for more insight on this topic.
             
### Storage Engine

Olric implements a GC-friendly storage engine to store large amounts of data on RAM. Basically, it applies an append-only log file approach with indexes. 
Olric inserts key/value pairs into pre-allocated byte slices (table in Olric terminology) and indexes that memory region by using Golang's built-in map. 
The data type of this map is `map[uint64]uint64`. When a pre-allocated byte slice is full Olric allocates a new one and continues inserting the new data into it. 
This design greatly reduces the write latency.

When you want to read a key/value pair from the Olric cluster, it scans the related DMap fragment by iterating over the indexes(implemented by the built-in map). 
The number of allocated byte slices should be small. So Olric would find the key immediately but technically, the read performance depends on the number of keys in the fragment. 
The effect of this design on the read performance is negligible.

The size of the pre-allocated byte slices is configurable.

## Samples

In this section, you can find code snippets for various scenarios.

### Embedded-member scenario
#### Distributed map
```go
package main

import (
  "context"
  "fmt"
  "log"
  "time"

  "github.com/olric-data/olric"
  "github.com/olric-data/olric/config"
)

func main() {
  // Sample for Olric v0.7.x

  // Deployment scenario: embedded-member
  // This creates a single-node Olric cluster. It's good enough for experimenting.

  // config.New returns a new config.Config with sane defaults. Available values for env:
  // local, lan, wan
  c := config.New("local")

  // Callback function. It's called when this node is ready to accept connections.
  ctx, cancel := context.WithCancel(context.Background())
  c.Started = func() {
    defer cancel()
    log.Println("[INFO] Olric is ready to accept connections")
  }

  // Create a new Olric instance.
  db, err := olric.New(c)
  if err != nil {
    log.Fatalf("Failed to create Olric instance: %v", err)
  }

  // Start the instance. It will form a single-node cluster.
  go func() {
    // Call Start at background. It's a blocker call.
    err = db.Start()
    if err != nil {
      log.Fatalf("olric.Start returned an error: %v", err)
    }
  }()

  <-ctx.Done()

  // In embedded-member scenario, you can use the EmbeddedClient. It implements
  // the Client interface.
  e := db.NewEmbeddedClient()

  dm, err := e.NewDMap("bucket-of-arbitrary-items")
  if err != nil {
    log.Fatalf("olric.NewDMap returned an error: %v", err)
  }

  ctx, cancel = context.WithCancel(context.Background())

  // Magic starts here!
  fmt.Println("##")
  fmt.Println("Simple Put/Get on a DMap instance:")
  err = dm.Put(ctx, "my-key", "Olric Rocks!")
  if err != nil {
    log.Fatalf("Failed to call Put: %v", err)
  }

  gr, err := dm.Get(ctx, "my-key")
  if err != nil {
    log.Fatalf("Failed to call Get: %v", err)
  }

  // Olric uses the Redis serialization format.
  value, err := gr.String()
  if err != nil {
    log.Fatalf("Failed to read Get response: %v", err)
  }

  fmt.Println("Response for my-key:", value)
  fmt.Println("##")

  // Don't forget the call Shutdown when you want to leave the cluster.
  ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
  defer cancel()

  err = db.Shutdown(ctx)
  if err != nil {
    log.Printf("Failed to shutdown Olric: %v", err)
  }
}
```

#### Publish-Subscribe

```go
package main

import (
  "context"
  "fmt"
  "log"
  "time"

  "github.com/olric-data/olric"
  "github.com/olric-data/olric/config"
)

func main() {
  // Sample for Olric v0.7.x

  // Deployment scenario: embedded-member
  // This creates a single-node Olric cluster. It's good enough for experimenting.

  // config.New returns a new config.Config with sane defaults. Available values for env:
  // local, lan, wan
  c := config.New("local")

  // Callback function. It's called when this node is ready to accept connections.
  ctx, cancel := context.WithCancel(context.Background())
  c.Started = func() {
    defer cancel()
    log.Println("[INFO] Olric is ready to accept connections")
  }

  // Create a new Olric instance.
  db, err := olric.New(c)
  if err != nil {
    log.Fatalf("Failed to create Olric instance: %v", err)
  }

  // Start the instance. It will form a single-node cluster.
  go func() {
    // Call Start at background. It's a blocker call.
    err = db.Start()
    if err != nil {
      log.Fatalf("olric.Start returned an error: %v", err)
    }
  }()

  <-ctx.Done()

  // In embedded-member scenario, you can use the EmbeddedClient. It implements
  // the Client interface.
  e := db.NewEmbeddedClient()

  ps, err := e.NewPubSub()
  if err != nil {
    log.Fatalf("olric.NewPubSub returned an error: %v", err)
  }

  ctx, cancel = context.WithCancel(context.Background())

  // Olric implements a drop-in replacement of Redis Publish-Subscribe messaging
  // system. PubSub client is just a thin layer around go-redis/redis.
  rps := ps.Subscribe(ctx, "my-channel")

  // Get a message to read messages from my-channel
  msg := rps.Channel()

  go func() {
    // Publish a message here.
    _, err := ps.Publish(ctx, "my-channel", "Olric Rocks!")
    if err != nil {
      log.Fatalf("PubSub.Publish returned an error: %v", err)
    }
  }()

  // Consume messages
  rm := <-msg

  fmt.Printf("Received message: \"%s\" from \"%s\"", rm.Channel, rm.Payload)

  // Don't forget the call Shutdown when you want to leave the cluster.
  ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
  defer cancel()

  err = e.Close(ctx)
  if err != nil {
    log.Printf("Failed to close EmbeddedClient: %v", err)
  }
}
```

### Client-Server scenario
#### Distributed map

```go
package main

import (
  "context"
  "fmt"
  "log"
  "time"

  "github.com/olric-data/olric"
)

func main() {
  // Sample for Olric v0.7.x

  // Deployment scenario: client-server

  // NewClusterClient takes a list of the nodes. This list may only contain a
  // load balancer address. Please note that Olric nodes will calculate the partition owner
  // and proxy the incoming requests.
  c, err := olric.NewClusterClient([]string{"localhost:3320"})
  if err != nil {
    log.Fatalf("olric.NewClusterClient returned an error: %v", err)
  }

  // In client-server scenario, you can use the ClusterClient. It implements
  // the Client interface.
  dm, err := c.NewDMap("bucket-of-arbitrary-items")
  if err != nil {
    log.Fatalf("olric.NewDMap returned an error: %v", err)
  }

  ctx, cancel := context.WithCancel(context.Background())

  // Magic starts here!
  fmt.Println("##")
  fmt.Println("Simple Put/Get on a DMap instance:")
  err = dm.Put(ctx, "my-key", "Olric Rocks!")
  if err != nil {
    log.Fatalf("Failed to call Put: %v", err)
  }

  gr, err := dm.Get(ctx, "my-key")
  if err != nil {
    log.Fatalf("Failed to call Get: %v", err)
  }

  // Olric uses the Redis serialization format.
  value, err := gr.String()
  if err != nil {
    log.Fatalf("Failed to read Get response: %v", err)
  }

  fmt.Println("Response for my-key:", value)
  fmt.Println("##")

  // Don't forget the call Shutdown when you want to leave the cluster.
  ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
  defer cancel()

  err = c.Close(ctx)
  if err != nil {
    log.Printf("Failed to close ClusterClient: %v", err)
  }
}
```

### SCAN on DMaps

```go
package main

import (
	"context"
	"fmt"
	"log"
	"time"

	"github.com/olric-data/olric"
	"github.com/olric-data/olric/config"
)

func main() {
	// Sample for Olric v0.7.x

	// Deployment scenario: embedded-member
	// This creates a single-node Olric cluster. It's good enough for experimenting.

	// config.New returns a new config.Config with sane defaults. Available values for env:
	// local, lan, wan
	c := config.New("local")

	// Callback function. It's called when this node is ready to accept connections.
	ctx, cancel := context.WithCancel(context.Background())
	c.Started = func() {
		defer cancel()
		log.Println("[INFO] Olric is ready to accept connections")
	}

	// Create a new Olric instance.
	db, err := olric.New(c)
	if err != nil {
		log.Fatalf("Failed to create Olric instance: %v", err)
	}

	// Start the instance. It will form a single-node cluster.
	go func() {
		// Call Start at background. It's a blocker call.
		err = db.Start()
		if err != nil {
			log.Fatalf("olric.Start returned an error: %v", err)
		}
	}()

	<-ctx.Done()

	// In embedded-member scenario, you can use the EmbeddedClient. It implements
	// the Client interface.
	e := db.NewEmbeddedClient()

	dm, err := e.NewDMap("bucket-of-arbitrary-items")
	if err != nil {
		log.Fatalf("olric.NewDMap returned an error: %v", err)
	}

	ctx, cancel = context.WithCancel(context.Background())

	// Magic starts here!
	fmt.Println("##")
	fmt.Println("Insert 10 keys")
	var key string
	for i := 0; i < 10; i++ {
		if i%2 == 0 {
			key = fmt.Sprintf("even:%d", i)
		} else {
			key = fmt.Sprintf("odd:%d", i)
		}
		err = dm.Put(ctx, key, nil)
		if err != nil {
			log.Fatalf("Failed to call Put: %v", err)
		}
	}

	i, err := dm.Scan(ctx)
	if err != nil {
		log.Fatalf("Failed to call Scan: %v", err)
	}

	fmt.Println("Iterate over all the keys")
	for i.Next() {
		fmt.Println(">> Key", i.Key())
	}

	i.Close()

	i, err = dm.Scan(ctx, olric.Match("^even:"))
	if err != nil {
		log.Fatalf("Failed to call Scan: %v", err)
	}

	fmt.Println("\n\nScan with regex: ^even:")
	for i.Next() {
		fmt.Println(">> Key", i.Key())
	}

	i.Close()

	// Don't forget the call Shutdown when you want to leave the cluster.
	ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
	defer cancel()

	err = db.Shutdown(ctx)
	if err != nil {
		log.Printf("Failed to shutdown Olric: %v", err)
	}
}
```

#### Publish-Subscribe
```go
package main

import (
  "context"
  "fmt"
  "log"
  "time"

  "github.com/olric-data/olric"
)

func main() {
  // Sample for Olric v0.7.x

  // Deployment scenario: client-server

  // NewClusterClient takes a list of the nodes. This list may only contain a
  // load balancer address. Please note that Olric nodes will calculate the partition owner
  // and proxy the incoming requests.
  c, err := olric.NewClusterClient([]string{"localhost:3320"})
  if err != nil {
    log.Fatalf("olric.NewClusterClient returned an error: %v", err)
  }

  // In client-server scenario, you can use the ClusterClient. It implements
  // the Client interface.
  ps, err := c.NewPubSub()
  if err != nil {
    log.Fatalf("olric.NewPubSub returned an error: %v", err)
  }

  ctx, cancel := context.WithCancel(context.Background())

  // Olric implements a drop-in replacement of Redis Publish-Subscribe messaging
  // system. PubSub client is just a thin layer around go-redis/redis.
  rps := ps.Subscribe(ctx, "my-channel")

  // Get a message to read messages from my-channel
  msg := rps.Channel()

  go func() {
    // Publish a message here.
    _, err := ps.Publish(ctx, "my-channel", "Olric Rocks!")
    if err != nil {
      log.Fatalf("PubSub.Publish returned an error: %v", err)
    }
  }()

  // Consume messages
  rm := <-msg

  fmt.Printf("Received message: \"%s\" from \"%s\"", rm.Channel, rm.Payload)

  // Don't forget the call Shutdown when you want to leave the cluster.
  ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
  defer cancel()

  err = c.Close(ctx)
  if err != nil {
    log.Printf("Failed to close ClusterClient: %v", err)
  }
}

```

## Contributions

Please don't hesitate to fork the project and send a pull request or just e-mail me to ask questions and share ideas.

## License

The Apache License, Version 2.0 - see LICENSE for more details.

## About the name

The inner voice of Turgut Özben who is the main character of [Oğuz Atay's masterpiece -The Disconnected-](https://www.themodernnovel.org/asia/other-asia/turkey/oguz-atay/the-disconnected/).


================================================
FILE: auth.go
================================================
// Copyright 2018-2025 The Olric Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package olric

import (
	"errors"

	"github.com/olric-data/olric/internal/protocol"
	"github.com/olric-data/olric/internal/server"
	"github.com/tidwall/redcon"
)

// authCommandHandler handles authentication requests sent by clients and verifies the provided password for access.
func (db *Olric) authCommandHandler(conn redcon.Conn, cmd redcon.Command) {
	authCmd, err := protocol.ParseAuthCommand(cmd)
	if err != nil {
		protocol.WriteError(conn, err)
		return
	}

	if !db.config.Authentication.Enabled() {
		protocol.WriteError(conn, errors.New("AUTH <password> called without any password configured for the default user. Are you sure your configuration is correct?"))
		return
	}

	if authCmd.Password == db.config.Authentication.Password {
		ctx := conn.Context().(*server.ConnContext)
		ctx.SetAuthenticated(true)
		conn.WriteString(protocol.StatusOK)
		return
	}
	protocol.WriteError(conn, ErrWrongPass)
}


================================================
FILE: auth_test.go
================================================
// Copyright 2018-2025 The Olric Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package olric

import (
	"context"
	"testing"

	"github.com/olric-data/olric/config"
	"github.com/olric-data/olric/internal/testutil"
	"github.com/stretchr/testify/require"
)

func TestAuthCommandHandler_WithPassword(t *testing.T) {
	cluster := newTestOlricCluster(t)
	testConfig := testutil.NewConfig()
	testConfig.Authentication = &config.Authentication{
		Password: "test-password",
	}
	db := cluster.addMemberWithConfig(t, testConfig)

	expectedMessage := "error while discovering the cluster members: wrong password"
	ctx := context.Background()
	t.Run("With correct credentials", func(t *testing.T) {
		c, err := NewClusterClient([]string{db.name}, WithPassword("test-password"))
		require.NoError(t, err)
		defer func() {
			require.NoError(t, c.Close(ctx))
		}()

		response, err := c.Ping(ctx, db.rt.This().String(), "")
		require.NoError(t, err)
		require.Equal(t, DefaultPingResponse, response)
	})

	t.Run("With wrong credentials", func(t *testing.T) {
		_, err := NewClusterClient([]string{db.name}, WithPassword("wrong"))
		require.ErrorContains(t, err, expectedMessage)
	})

	t.Run("Without credentials", func(t *testing.T) {
		_, err := NewClusterClient([]string{db.name}, WithPassword("wrong"))
		require.ErrorContains(t, err, expectedMessage)
	})
}

func TestAuthCommandHandler_Auth_Disabled(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	_, err := NewClusterClient([]string{db.name}, WithPassword("test-password"))
	require.ErrorContains(t, err, "error while discovering the cluster members: AUTH <password> called without any password configured for the default user. Are you sure your configuration is correct?")
}


================================================
FILE: client.go
================================================
// Copyright 2018-2025 The Olric Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package olric

import (
	"context"
	"time"

	"github.com/olric-data/olric/internal/dmap"
	"github.com/olric-data/olric/pkg/storage"
	"github.com/olric-data/olric/stats"
)

const DefaultScanCount = 10

// Member denotes a member of the Olric cluster.
type Member struct {
	// Member name in the cluster. It's also host:port of the node.
	Name string

	// ID of the Member in the cluster. Hash of Name and Birthdate of the member
	ID uint64

	// Birthdate of the member in nanoseconds.
	Birthdate int64

	// Role of the member in the cluster. There is only one coordinator member
	// in a healthy cluster.
	Coordinator bool
}

// Iterator defines an interface to implement iterators on the distributed maps.
type Iterator interface {
	// Next returns true if there is more key in the iterator implementation.
	// Otherwise, it returns false.
	Next() bool

	// Key returns a key name from the distributed map.
	Key() string

	// Close stops the iteration and releases allocated resources.
	Close()
}

// LockContext interface defines methods to manage locks on distributed maps.
type LockContext interface {
	// Unlock releases an acquired lock for the given key. It returns ErrNoSuchLock
	// if there is no lock for the given key.
	Unlock(ctx context.Context) error

	// Lease sets or updates the timeout of the acquired lock for the given key.
	// It returns ErrNoSuchLock if there is no lock for the given key.
	Lease(ctx context.Context, duration time.Duration) error
}

// PutOption is a function for define options to control behavior of the Put command.
type PutOption func(*dmap.PutConfig)

// EX sets the specified expire time, in seconds.
func EX(ex time.Duration) PutOption {
	return func(cfg *dmap.PutConfig) {
		cfg.HasEX = true
		cfg.EX = ex
	}
}

// PX sets the specified expire time, in milliseconds.
func PX(px time.Duration) PutOption {
	return func(cfg *dmap.PutConfig) {
		cfg.HasPX = true
		cfg.PX = px
	}
}

// EXAT sets the specified Unix time at which the key will expire, in seconds.
func EXAT(exat time.Duration) PutOption {
	return func(cfg *dmap.PutConfig) {
		cfg.HasEXAT = true
		cfg.EXAT = exat
	}
}

// PXAT sets the specified Unix time at which the key will expire, in milliseconds.
func PXAT(pxat time.Duration) PutOption {
	return func(cfg *dmap.PutConfig) {
		cfg.HasPXAT = true
		cfg.PXAT = pxat
	}
}

// NX only sets the key if it does not already exist.
func NX() PutOption {
	return func(cfg *dmap.PutConfig) {
		cfg.HasNX = true
	}
}

// XX only sets the key if it already exists.
func XX() PutOption {
	return func(cfg *dmap.PutConfig) {
		cfg.HasXX = true
	}
}

type dmapConfig struct {
	storageEntryImplementation func() storage.Entry
}

// DMapOption is a function for defining options to control behavior of distributed map instances.
type DMapOption func(*dmapConfig)

// StorageEntryImplementation sets and encoder/decoder implementation for your choice of storage engine.
func StorageEntryImplementation(e func() storage.Entry) DMapOption {
	return func(cfg *dmapConfig) {
		cfg.storageEntryImplementation = e
	}
}

// ScanOption is a function for defining options to control behavior of the SCAN command.
type ScanOption func(*dmap.ScanConfig)

// Count is the user specified the amount of work that should be done at every call in order to
// retrieve elements from the distributed map. This is just a hint for the implementation,
// however generally speaking this is what you could expect most of the time from the implementation.
// The default value is 10.
func Count(c int) ScanOption {
	return func(cfg *dmap.ScanConfig) {
		cfg.HasCount = true
		cfg.Count = c
	}
}

// Match is used for using regular expressions on keys. See https://pkg.go.dev/regexp
func Match(s string) ScanOption {
	return func(cfg *dmap.ScanConfig) {
		cfg.HasMatch = true
		cfg.Match = s
	}
}

// DMap defines methods to access and manipulate distributed maps.
type DMap interface {
	// Name exposes name of the DMap.
	Name() string

	// Put sets the value for the given key. It overwrites any previous value for
	// that key, and it's thread-safe. The key has to be a string. value type is arbitrary.
	// It is safe to modify the contents of the arguments after Put returns but not before.
	Put(ctx context.Context, key string, value interface{}, options ...PutOption) error

	// Get gets the value for the given key. It returns ErrKeyNotFound if the DB
	// does not contain the key. It's thread-safe. It is safe to modify the contents
	// of the returned value. See GetResponse for the details.
	Get(ctx context.Context, key string) (*GetResponse, error)

	// Delete deletes values for the given keys. Delete will not return error
	// if key doesn't exist. It's thread-safe. It is safe to modify the contents
	// of the argument after Delete returns.
	Delete(ctx context.Context, keys ...string) (int, error)

	// Incr atomically increments the key by delta. The return value is the new value
	// after being incremented or an error.
	Incr(ctx context.Context, key string, delta int) (int, error)

	// Decr atomically decrements the key by delta. The return value is the new value
	// after being decremented or an error.
	Decr(ctx context.Context, key string, delta int) (int, error)

	// GetPut atomically sets the key to value and returns the old value stored at key. It returns nil if there is no
	// previous value.
	GetPut(ctx context.Context, key string, value interface{}) (*GetResponse, error)

	// IncrByFloat atomically increments the key by delta. The return value is the new value
	// after being incremented or an error.
	IncrByFloat(ctx context.Context, key string, delta float64) (float64, error)

	// Expire updates the expiry for the given key. It returns ErrKeyNotFound if
	// the DB does not contain the key. It's thread-safe.
	Expire(ctx context.Context, key string, timeout time.Duration) error

	// Lock sets a lock for the given key. Acquired lock is only for the key in
	// this dmap.
	//
	// It returns immediately if it acquires the lock for the given key. Otherwise,
	// it waits until deadline.
	//
	// You should know that the locks are approximate, and only to be used for
	// non-critical purposes.
	Lock(ctx context.Context, key string, deadline time.Duration) (LockContext, error)

	// LockWithTimeout sets a lock for the given key. If the lock is still unreleased
	// the end of given period of time,
	// it automatically releases the lock. Acquired lock is only for the key in
	// this dmap.
	//
	// It returns immediately if it acquires the lock for the given key. Otherwise,
	// it waits until deadline.
	//
	// You should know that the locks are approximate, and only to be used for
	// non-critical purposes.
	LockWithTimeout(ctx context.Context, key string, timeout, deadline time.Duration) (LockContext, error)

	// Scan returns an iterator to loop over the keys.
	//
	// Available scan options:
	//
	// * Count
	// * Match
	Scan(ctx context.Context, options ...ScanOption) (Iterator, error)

	// Destroy flushes the given DMap on the cluster. You should know that there
	// is no global lock on DMaps. So if you call Put/PutEx and Destroy methods
	// concurrently on the cluster, Put call may set new values to the DMap.
	Destroy(ctx context.Context) error

	// Pipeline is a mechanism to realise Redis Pipeline technique.
	//
	// Pipelining is a technique to extremely speed up processing by packing
	// operations to batches, send them at once to Redis and read a replies in a
	// singe step.
	// See https://redis.io/topics/pipelining
	//
	// Pay attention, that Pipeline is not a transaction, so you can get unexpected
	// results in case of big pipelines and small read/write timeouts.
	// Redis client has retransmission logic in case of timeouts, pipeline
	// can be retransmitted and commands can be executed more than once.
	Pipeline(opts ...PipelineOption) (*DMapPipeline, error)

	// Close stops background routines and frees allocated resources.
	Close(ctx context.Context) error
}

// PipelineOption is a function for defining options to control behavior of the Pipeline command.
type PipelineOption func(pipeline *DMapPipeline)

// PipelineConcurrency is a PipelineOption controlling the number of concurrent goroutines.
func PipelineConcurrency(concurrency int) PipelineOption {
	return func(dp *DMapPipeline) {
		dp.concurrency = concurrency
	}
}

type statsConfig struct {
	CollectRuntime bool
}

// StatsOption is a function for defining options to control behavior of the STATS command.
type StatsOption func(*statsConfig)

// CollectRuntime is a StatsOption for collecting Go runtime statistics from a cluster member.
func CollectRuntime() StatsOption {
	return func(cfg *statsConfig) {
		cfg.CollectRuntime = true
	}
}

type pubsubConfig struct {
	Address string
}

// ToAddress is a PubSubOption for using a specific cluster member to publish messages to a channel.
func ToAddress(addr string) PubSubOption {
	return func(cfg *pubsubConfig) {
		cfg.Address = addr
	}
}

// PubSubOption is a function for defining options to control behavior of the Publish-Subscribe service.
type PubSubOption func(option *pubsubConfig)

// Client is an interface that denotes an Olric client.
type Client interface {
	// NewDMap returns a new DMap client with the given options.
	NewDMap(name string, options ...DMapOption) (DMap, error)

	// NewPubSub returns a new PubSub client with the given options.
	NewPubSub(options ...PubSubOption) (*PubSub, error)

	// Stats returns stats.Stats with the given options.
	Stats(ctx context.Context, address string, options ...StatsOption) (stats.Stats, error)

	// Ping sends a ping message to an Olric node. Returns PONG if message is empty,
	// otherwise return a copy of the message as a bulk. This command is often used to test
	// if a connection is still alive, or to measure latency.
	Ping(ctx context.Context, address, message string) (string, error)

	// RoutingTable returns the latest version of the routing table.
	RoutingTable(ctx context.Context) (RoutingTable, error)

	// Members returns a thread-safe list of cluster members.
	Members(ctx context.Context) ([]Member, error)

	// RefreshMetadata fetches a list of available members and the latest routing
	// table version. It also closes stale clients, if there are any.
	RefreshMetadata(ctx context.Context) error

	// Close stops background routines and frees allocated resources.
	Close(ctx context.Context) error
}


================================================
FILE: cluster.go
================================================
// Copyright 2018-2025 The Olric Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package olric

import (
	"context"
	"fmt"
	"strconv"

	"github.com/olric-data/olric/internal/protocol"
	"github.com/tidwall/redcon"
)

type Route struct {
	PrimaryOwners []string
	ReplicaOwners []string
}

type RoutingTable map[uint64]Route

func mapToRoutingTable(slice []interface{}) (RoutingTable, error) {
	rt := make(RoutingTable)
	for _, raw := range slice {
		item := raw.([]interface{})
		rawPartID, rawPrimaryOwners, rawReplicaOwners := item[0], item[1], item[2]
		var partID uint64
		switch rawPartID.(type) {
		case int64:
			partID = uint64(rawPartID.(int64))
		case string:
			raw, err := strconv.ParseUint(rawPartID.(string), 10, 64)
			if err != nil {
				return nil, fmt.Errorf("invalid partition id: %v: %w", rawPartID, err)
			}
			partID = raw
		default:
			return nil, fmt.Errorf("invalid partition id: %v", rawPartID)
		}

		r := Route{}
		primaryOwners, ok := rawPrimaryOwners.([]interface{})
		if !ok {
			return nil, fmt.Errorf("invalid primary owners: %v", rawPrimaryOwners)
		}
		for _, rawOwner := range primaryOwners {
			owner, ok := rawOwner.(string)
			if !ok {
				return nil, fmt.Errorf("invalid owner: %v", owner)
			}
			r.PrimaryOwners = append(r.PrimaryOwners, owner)
		}

		replicaOwners, ok := rawReplicaOwners.([]interface{})
		if !ok {
			return nil, fmt.Errorf("invalid replica owners: %v", rawPrimaryOwners)
		}
		for _, rawOwner := range replicaOwners {
			owner, ok := rawOwner.(string)
			if !ok {
				return nil, fmt.Errorf("invalid owner: %v", owner)
			}
			r.ReplicaOwners = append(r.ReplicaOwners, owner)
		}
		rt[partID] = r
	}
	return rt, nil
}

func (db *Olric) clusterRoutingTableCommandHandler(conn redcon.Conn, cmd redcon.Command) {
	_, err := protocol.ParseClusterRoutingTable(cmd)
	if err != nil {
		protocol.WriteError(conn, err)
		return
	}
	coordinator := db.rt.Discovery().GetCoordinator()
	if coordinator.CompareByID(db.rt.This()) {
		conn.WriteArray(int(db.config.PartitionCount))
		rt := db.fillRoutingTable()
		for partID := uint64(0); partID < db.config.PartitionCount; partID++ {
			conn.WriteArray(3)
			conn.WriteUint64(partID)

			r := rt[partID]
			primaryOwners := r.PrimaryOwners
			conn.WriteArray(len(primaryOwners))
			for _, owner := range primaryOwners {
				conn.WriteBulkString(owner)
			}

			replicaOwners := r.ReplicaOwners
			conn.WriteArray(len(replicaOwners))
			for _, owner := range replicaOwners {
				conn.WriteBulkString(owner)
			}
		}
		return
	}

	// Redirect to the cluster coordinator
	rtCmd := protocol.NewClusterRoutingTable().Command(db.ctx)
	rc := db.client.Get(coordinator.String())
	err = rc.Process(db.ctx, rtCmd)
	if err != nil {
		protocol.WriteError(conn, err)
		return
	}
	slice, err := rtCmd.Slice()
	if err != nil {
		protocol.WriteError(conn, err)
		return
	}
	conn.WriteAny(slice)
}

func (db *Olric) fillRoutingTable() RoutingTable {
	rt := make(RoutingTable)
	for partID := uint64(0); partID < db.config.PartitionCount; partID++ {
		r := Route{}
		primaryOwners := db.primary.PartitionOwnersByID(partID)
		for _, owner := range primaryOwners {
			r.PrimaryOwners = append(r.PrimaryOwners, owner.String())
		}
		replicaOwners := db.backup.PartitionOwnersByID(partID)
		for _, owner := range replicaOwners {
			r.ReplicaOwners = append(r.ReplicaOwners, owner.String())
		}
		rt[partID] = r
	}
	return rt
}

func (db *Olric) routingTable(ctx context.Context) (RoutingTable, error) {
	coordinator := db.rt.Discovery().GetCoordinator()
	if coordinator.CompareByID(db.rt.This()) {
		return db.fillRoutingTable(), nil
	}

	rtCmd := protocol.NewClusterRoutingTable().Command(ctx)
	rc := db.client.Get(coordinator.String())
	err := rc.Process(ctx, rtCmd)
	if err != nil {
		return nil, err
	}
	slice, err := rtCmd.Slice()
	if err != nil {
		return nil, err
	}
	return mapToRoutingTable(slice)
}

func (db *Olric) clusterMembersCommandHandler(conn redcon.Conn, cmd redcon.Command) {
	_, err := protocol.ParseClusterMembers(cmd)
	if err != nil {
		protocol.WriteError(conn, err)
		return
	}

	coordinator := db.rt.Discovery().GetCoordinator()
	members := db.rt.Discovery().GetMembers()
	conn.WriteArray(len(members))
	for _, member := range members {
		conn.WriteArray(3)
		conn.WriteBulkString(member.Name)
		// go-redis/redis package cannot handle uint64. At the time of this writing,
		// there is no solution for this, and I don't want to use a soft fork to repair it.
		//conn.WriteUint64(member.ID)
		conn.WriteInt64(member.Birthdate)
		if coordinator.CompareByID(member) {
			conn.WriteBulkString("true")
		} else {
			conn.WriteBulkString("false")
		}
	}
}


================================================
FILE: cluster_client.go
================================================
// Copyright 2018-2025 The Olric Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package olric

import (
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"log"
	"net"
	"os"
	"sync"
	"sync/atomic"
	"syscall"
	"time"

	"github.com/olric-data/olric/config"
	"github.com/olric-data/olric/hasher"
	"github.com/olric-data/olric/internal/bufpool"
	"github.com/olric-data/olric/internal/cluster/partitions"
	"github.com/olric-data/olric/internal/discovery"
	"github.com/olric-data/olric/internal/dmap"
	"github.com/olric-data/olric/internal/protocol"
	"github.com/olric-data/olric/internal/ramblock/entry"
	"github.com/olric-data/olric/internal/resp"
	"github.com/olric-data/olric/internal/server"
	"github.com/olric-data/olric/pkg/storage"
	"github.com/olric-data/olric/stats"
	"github.com/redis/go-redis/v9"
)

var pool = bufpool.New()

// DefaultRoutingTableFetchInterval is the default value of RoutingTableFetchInterval. ClusterClient implementation
// fetches the routing table from the cluster to route requests to the right partition.
const DefaultRoutingTableFetchInterval = time.Minute

type ClusterLockContext struct {
	key   string
	token string
	dm    *ClusterDMap
}

// ClusterDMap implements a client for DMaps.
type ClusterDMap struct {
	name          string
	newEntry      func() storage.Entry
	config        *dmapConfig
	client        *server.Client
	clusterClient *ClusterClient
}

// Name exposes name of the DMap.
func (dm *ClusterDMap) Name() string {
	return dm.name
}

// processProtocolError processes protocol-related errors and translates them into defined application-level errors.
func processProtocolError(err error) error {
	if err == nil {
		return nil
	}
	if errors.Is(err, redis.Nil) {
		return ErrKeyNotFound
	}
	if errors.Is(err, syscall.ECONNREFUSED) {
		opErr := err.(*net.OpError)
		return fmt.Errorf("%s %s %s: %w", opErr.Op, opErr.Net, opErr.Addr, ErrConnRefused)
	}
	return convertDMapError(protocol.ConvertError(err))
}

// writePutCommand constructs and returns a new protocol.Put command based on the provided key, value, and configuration options.
func (dm *ClusterDMap) writePutCommand(c *dmap.PutConfig, key string, value []byte) *protocol.Put {
	cmd := protocol.NewPut(dm.name, key, value)
	switch {
	case c.HasEX:
		cmd.SetEX(c.EX.Seconds())
	case c.HasPX:
		cmd.SetPX(c.PX.Milliseconds())
	case c.HasEXAT:
		cmd.SetEXAT(c.EXAT.Seconds())
	case c.HasPXAT:
		cmd.SetPXAT(c.PXAT.Milliseconds())
	}

	switch {
	case c.HasNX:
		cmd.SetNX()
	case c.HasXX:
		cmd.SetXX()
	}

	return cmd
}

func (cl *ClusterClient) clientByPartID(partID uint64) (*redis.Client, error) {
	raw := cl.routingTable.Load()
	if raw == nil {
		return nil, fmt.Errorf("routing table is empty")
	}

	routingTable, ok := raw.(RoutingTable)
	if !ok {
		return nil, fmt.Errorf("routing table is corrupt")
	}

	route := routingTable[partID]
	if len(route.PrimaryOwners) == 0 {
		return nil, fmt.Errorf("primary owners list for %d is empty", partID)
	}

	primaryOwner := route.PrimaryOwners[len(route.PrimaryOwners)-1]
	return cl.client.Get(primaryOwner), nil
}

func (cl *ClusterClient) smartPick(dmap, key string) (*redis.Client, error) {
	hkey := partitions.HKey(dmap, key)
	partID := hkey % cl.partitionCount
	return cl.clientByPartID(partID)
}

// Put sets the value for the given key. It overwrites any previous value for
// that key, and it's thread-safe. The key has to be a string. value type is arbitrary.
// It is safe to modify the contents of the arguments after Put returns but not before.
func (dm *ClusterDMap) Put(ctx context.Context, key string, value interface{}, options ...PutOption) error {
	rc, err := dm.clusterClient.smartPick(dm.name, key)
	if err != nil {
		return err
	}

	valueBuf := pool.Get()
	defer pool.Put(valueBuf)

	enc := resp.New(valueBuf)
	err = enc.Encode(value)
	if err != nil {
		return err
	}

	var pc dmap.PutConfig
	for _, opt := range options {
		opt(&pc)
	}
	putCmd := dm.writePutCommand(&pc, key, valueBuf.Bytes())
	cmd := putCmd.Command(ctx)

	err = rc.Process(ctx, cmd)
	if err != nil {
		return processProtocolError(err)
	}
	return processProtocolError(cmd.Err())
}

func (dm *ClusterDMap) makeGetResponse(cmd *redis.StringCmd) (*GetResponse, error) {
	raw, err := cmd.Bytes()
	if err != nil {
		return nil, processProtocolError(err)
	}

	e := dm.newEntry()
	e.Decode(raw)
	return &GetResponse{
		entry: e,
	}, nil
}

// Get gets the value for the given key. It returns ErrKeyNotFound if the DB
// does not contain the key. It's thread-safe. It is safe to modify the contents
// of the returned value. See GetResponse for the details.
func (dm *ClusterDMap) Get(ctx context.Context, key string) (*GetResponse, error) {
	cmd := protocol.NewGet(dm.name, key).SetRaw().Command(ctx)
	rc, err := dm.clusterClient.smartPick(dm.name, key)
	if err != nil {
		return nil, err
	}
	err = rc.Process(ctx, cmd)
	if err != nil {
		return nil, processProtocolError(err)
	}
	return dm.makeGetResponse(cmd)
}

// Delete deletes values for the given keys. Delete will not return an error if the key doesn't exist.
// It's thread-safe. It is safe to modify the contents of the argument after Delete returns.
func (dm *ClusterDMap) Delete(ctx context.Context, keys ...string) (int, error) {
	rc, err := dm.client.Pick()
	if err != nil {
		return 0, err
	}

	cmd := protocol.NewDel(dm.name, keys...).Command(ctx)
	err = rc.Process(ctx, cmd)
	if err != nil {
		return 0, processProtocolError(err)
	}

	res, err := cmd.Uint64()
	if err != nil {
		return 0, processProtocolError(cmd.Err())
	}
	return int(res), nil
}

// Incr atomically increments the key by delta. The return value is the new value
// after being incremented or an error.
func (dm *ClusterDMap) Incr(ctx context.Context, key string, delta int) (int, error) {
	rc, err := dm.clusterClient.smartPick(dm.name, key)
	if err != nil {
		return 0, err
	}

	cmd := protocol.NewIncr(dm.name, key, delta).Command(ctx)
	err = rc.Process(ctx, cmd)
	if err != nil {
		return 0, processProtocolError(err)
	}
	res, err := cmd.Uint64()
	if err != nil {
		return 0, processProtocolError(cmd.Err())
	}
	return int(res), nil
}

// Decr atomically decrements the key by delta. The return value is the new value
// after being decremented or an error.
func (dm *ClusterDMap) Decr(ctx context.Context, key string, delta int) (int, error) {
	rc, err := dm.clusterClient.smartPick(dm.name, key)
	if err != nil {
		return 0, err
	}

	cmd := protocol.NewDecr(dm.name, key, delta).Command(ctx)
	err = rc.Process(ctx, cmd)
	if err != nil {
		return 0, processProtocolError(err)
	}
	res, err := cmd.Uint64()
	if err != nil {
		return 0, processProtocolError(cmd.Err())
	}
	return int(res), nil
}

// GetPut atomically sets the key to value and returns the old value stored at a key. It returns nil if there is no
// previous value.
func (dm *ClusterDMap) GetPut(ctx context.Context, key string, value interface{}) (*GetResponse, error) {
	rc, err := dm.clusterClient.smartPick(dm.name, key)
	if err != nil {
		return nil, err
	}

	valueBuf := pool.Get()
	defer pool.Put(valueBuf)

	enc := resp.New(valueBuf)
	err = enc.Encode(value)
	if err != nil {
		return nil, err
	}

	cmd := protocol.NewGetPut(dm.name, key, valueBuf.Bytes()).SetRaw().Command(ctx)
	err = rc.Process(ctx, cmd)
	err = processProtocolError(err)
	if err != nil {
		// First try to set a key/value with GetPut
		if errors.Is(err, ErrKeyNotFound) {
			return nil, nil
		}
		return nil, err
	}

	raw, err := cmd.Bytes()
	if err != nil {
		return nil, processProtocolError(err)
	}

	e := dm.newEntry()
	e.Decode(raw)
	return &GetResponse{
		entry: e,
	}, nil
}

// IncrByFloat atomically increments the key by delta. The return value is the new value
// after being incremented or an error.
func (dm *ClusterDMap) IncrByFloat(ctx context.Context, key string, delta float64) (float64, error) {
	rc, err := dm.clusterClient.smartPick(dm.name, key)
	if err != nil {
		return 0, err
	}

	cmd := protocol.NewIncrByFloat(dm.name, key, delta).Command(ctx)
	err = rc.Process(ctx, cmd)
	if err != nil {
		return 0, processProtocolError(err)
	}
	res, err := cmd.Result()
	if err != nil {
		return 0, processProtocolError(cmd.Err())
	}
	return res, nil
}

// Expire updates the expiry for the given key. It returns ErrKeyNotFound if
// the DB does not contain the key. It's thread-safe.
func (dm *ClusterDMap) Expire(ctx context.Context, key string, timeout time.Duration) error {
	rc, err := dm.clusterClient.smartPick(dm.name, key)
	if err != nil {
		return err
	}

	cmd := protocol.NewExpire(dm.name, key, timeout).Command(ctx)
	err = rc.Process(ctx, cmd)
	if err != nil {
		return processProtocolError(err)
	}
	return processProtocolError(cmd.Err())
}

// Lock sets a lock for the given key. Acquired lock is only for the key in
// this dmap.
//
// It returns immediately if it acquires the lock for the given key. Otherwise,
// it waits until deadline.
//
// You should know that the locks are approximate and only to be used for
// non-critical purposes.
func (dm *ClusterDMap) Lock(ctx context.Context, key string, deadline time.Duration) (LockContext, error) {
	rc, err := dm.clusterClient.smartPick(dm.name, key)
	if err != nil {
		return nil, err
	}

	cmd := protocol.NewLock(dm.name, key, deadline.Seconds()).Command(ctx)
	err = rc.Process(ctx, cmd)
	if err != nil {
		return nil, processProtocolError(err)
	}

	token, err := cmd.Bytes()
	if err != nil {
		return nil, processProtocolError(err)
	}
	return &ClusterLockContext{
		key:   key,
		token: string(token),
		dm:    dm,
	}, nil
}

// LockWithTimeout sets a lock for the given key. If the lock is still unreleased
// the end of a given period of time, it automatically releases the lock.
// Acquired lock is only for the key in this DMap.
//
// It returns immediately if it acquires the lock for the given key. Otherwise,
// it waits until deadline.
//
// You should know that the locks are approximate and only to be used for
// non-critical purposes.
func (dm *ClusterDMap) LockWithTimeout(ctx context.Context, key string, timeout, deadline time.Duration) (LockContext, error) {
	rc, err := dm.clusterClient.smartPick(dm.name, key)
	if err != nil {
		return nil, err
	}

	cmd := protocol.NewLock(dm.name, key, deadline.Seconds()).SetPX(timeout.Milliseconds()).Command(ctx)
	err = rc.Process(ctx, cmd)
	if err != nil {
		return nil, processProtocolError(err)
	}

	token, err := cmd.Bytes()
	if err != nil {
		return nil, processProtocolError(err)
	}

	return &ClusterLockContext{
		key:   key,
		token: string(token),
		dm:    dm,
	}, nil
}

// Close stops background routines and frees allocated resources.
func (dm *ClusterDMap) Close(_ context.Context) error {
	return nil
}

// Unlock releases the distributed lock associated with the current context by using the provided context for execution.
func (c *ClusterLockContext) Unlock(ctx context.Context) error {
	rc, err := c.dm.clusterClient.smartPick(c.dm.name, c.key)
	if err != nil {
		return err
	}
	cmd := protocol.NewUnlock(c.dm.name, c.key, c.token).Command(ctx)
	err = rc.Process(ctx, cmd)
	if err != nil {
		return processProtocolError(err)
	}
	return processProtocolError(cmd.Err())
}

// Lease extends the lease of the distributed lock associated with the context for the specified duration.
func (c *ClusterLockContext) Lease(ctx context.Context, duration time.Duration) error {
	rc, err := c.dm.clusterClient.smartPick(c.dm.name, c.key)
	if err != nil {
		return err
	}
	cmd := protocol.NewLockLease(c.dm.name, c.key, c.token, duration.Seconds()).Command(ctx)
	err = rc.Process(ctx, cmd)
	if err != nil {
		return processProtocolError(err)
	}
	return processProtocolError(cmd.Err())
}

// Scan returns an iterator to loop over the keys.
//
// Available scan options:
//
// * Count
// * Match
func (dm *ClusterDMap) Scan(ctx context.Context, options ...ScanOption) (Iterator, error) {
	var sc dmap.ScanConfig
	for _, opt := range options {
		opt(&sc)
	}
	if sc.Count == 0 {
		sc.Count = DefaultScanCount
	}

	ictx, cancel := context.WithCancel(ctx)
	i := &ClusterIterator{
		dm:            dm,
		clusterClient: dm.clusterClient,
		config:        &sc,
		logger:        dm.clusterClient.logger,
		partitionKeys: make(map[string]struct{}),
		cursors:       make(map[uint64]map[string]*currentCursor),
		ctx:           ictx,
		cancel:        cancel,
	}

	// Embedded iterator uses a slightly different scan function.
	i.scanner = i.scanOnOwners

	if err := i.fetchRoutingTable(); err != nil {
		return nil, err
	}
	// Load the route for the first partition (0) to scan.
	i.loadRoute()

	i.wg.Add(1)
	go i.fetchRoutingTablePeriodically()

	return i, nil
}

// Destroy flushes the given DMap on the cluster. You should know that there
// is no global lock on DMaps. So if you call Put/PutEx and Destroy methods
// concurrently on the cluster, Put call may set new values to the DMap.
func (dm *ClusterDMap) Destroy(ctx context.Context) error {
	rc, err := dm.client.Pick()
	if err != nil {
		return err
	}

	cmd := protocol.NewDestroy(dm.name).Command(ctx)
	err = rc.Process(ctx, cmd)
	if err != nil {
		return processProtocolError(err)
	}

	return processProtocolError(cmd.Err())
}

// ClusterClient is a client for managing and interacting with a distributed cluster of nodes.
type ClusterClient struct {
	client         *server.Client
	config         *clusterClientConfig
	logger         *log.Logger
	routingTable   atomic.Value
	partitionCount uint64
	wg             sync.WaitGroup
	ctx            context.Context
	cancel         context.CancelFunc
}

// Ping sends a ping message to an Olric node. Returns PONG if a message is empty,
// otherwise return a copy of the message as bulk. This command is often used to test
// if a connection is still alive or to measure latency.
func (cl *ClusterClient) Ping(ctx context.Context, addr, message string) (string, error) {
	pingCmd := protocol.NewPing()
	if message != "" {
		pingCmd.SetMessage(message)
	}
	cmd := pingCmd.Command(ctx)

	rc := cl.client.Get(addr)
	err := rc.Process(ctx, cmd)
	if err != nil {
		return "", processProtocolError(err)
	}
	err = processProtocolError(cmd.Err())
	if err != nil {
		return "", nil
	}

	return cmd.Result()
}

// RoutingTable returns the latest version of the routing table.
func (cl *ClusterClient) RoutingTable(ctx context.Context) (RoutingTable, error) {
	cmd := protocol.NewClusterRoutingTable().Command(ctx)
	rc, err := cl.client.Pick()
	if err != nil {
		return RoutingTable{}, err
	}

	err = rc.Process(ctx, cmd)
	if err != nil {
		return RoutingTable{}, processProtocolError(err)
	}

	if err = cmd.Err(); err != nil {
		return RoutingTable{}, processProtocolError(err)
	}

	result, err := cmd.Slice()
	if err != nil {
		return RoutingTable{}, processProtocolError(err)

	}
	return mapToRoutingTable(result)
}

// Stats returns stats.Stats with the given options.
func (cl *ClusterClient) Stats(ctx context.Context, address string, options ...StatsOption) (stats.Stats, error) {
	var cfg statsConfig
	for _, opt := range options {
		opt(&cfg)
	}

	statsCmd := protocol.NewStats()
	if cfg.CollectRuntime {
		statsCmd.SetCollectRuntime()
	}

	cmd := statsCmd.Command(ctx)
	rc := cl.client.Get(address)

	err := rc.Process(ctx, cmd)
	if err != nil {
		return stats.Stats{}, processProtocolError(err)
	}

	if err = cmd.Err(); err != nil {
		return stats.Stats{}, processProtocolError(err)
	}
	data, err := cmd.Bytes()
	if err != nil {
		return stats.Stats{}, processProtocolError(err)
	}
	var s stats.Stats
	err = json.Unmarshal(data, &s)
	if err != nil {
		return stats.Stats{}, processProtocolError(err)
	}
	return s, nil
}

// Members returns a thread-safe list of cluster members.
func (cl *ClusterClient) Members(ctx context.Context) ([]Member, error) {
	rc, err := cl.client.Pick()
	if err != nil {
		return []Member{}, err
	}

	cmd := protocol.NewClusterMembers().Command(ctx)
	err = rc.Process(ctx, cmd)
	if err != nil {
		return []Member{}, processProtocolError(err)
	}

	if err = cmd.Err(); err != nil {
		return []Member{}, processProtocolError(err)
	}

	items, err := cmd.Slice()
	if err != nil {
		return []Member{}, processProtocolError(err)
	}
	var members []Member
	for _, rawItem := range items {
		m := Member{}
		item := rawItem.([]interface{})
		m.Name = item[0].(string)
		m.Birthdate = item[1].(int64)

		// go-redis/redis package cannot handle uint64 type. At the time of this writing,
		// there is no solution for this, and I don't want to use a soft fork to repair it.
		m.ID = discovery.MemberID(m.Name, m.Birthdate)

		if item[2] == "true" {
			m.Coordinator = true
		}
		members = append(members, m)
	}
	return members, nil
}

// RefreshMetadata fetches a list of available members and the latest routing
// table version. It also closes stale clients if there are any.
func (cl *ClusterClient) RefreshMetadata(ctx context.Context) error {
	// Fetch a list of currently available cluster members.
	var members []Member
	var err error
	for {
		members, err = cl.Members(ctx)
		if errors.Is(err, ErrConnRefused) {
			continue
		}
		if err != nil {
			return err
		}
		break
	}
	// Use a map for fast access.
	addresses := make(map[string]struct{})
	for _, member := range members {
		addresses[member.Name] = struct{}{}
	}

	// Clean stale client connections
	for addr := range cl.client.Addresses() {
		if _, ok := addresses[addr]; !ok {
			// Gone
			if err := cl.client.Close(addr); err != nil {
				return err
			}
		}
	}

	// Re-fetch the routing table, we should use the latest routing table version.
	return cl.fetchRoutingTable()
}

// Close stops background routines and frees allocated resources.
func (cl *ClusterClient) Close(ctx context.Context) error {
	select {
	case <-cl.ctx.Done():
		return nil
	default:
	}

	cl.cancel()

	// Wait for the background workers:
	// * fetchRoutingTablePeriodically
	cl.wg.Wait()

	// Close the underlying TCP sockets gracefully.
	return cl.client.Shutdown(ctx)
}

// NewPubSub returns a new PubSub client with the given options.
func (cl *ClusterClient) NewPubSub(options ...PubSubOption) (*PubSub, error) {
	return newPubSub(cl.client, options...)
}

// NewDMap returns a new DMap client with the given options.
func (cl *ClusterClient) NewDMap(name string, options ...DMapOption) (DMap, error) {
	var dc dmapConfig
	for _, opt := range options {
		opt(&dc)
	}

	if dc.storageEntryImplementation == nil {
		dc.storageEntryImplementation = func() storage.Entry {
			return entry.New()
		}
	}

	return &ClusterDMap{name: name,
		config:        &dc,
		newEntry:      dc.storageEntryImplementation,
		client:        cl.client,
		clusterClient: cl,
	}, nil
}

// ClusterClientOption is a functional option for configuring a clusterClientConfig instance.
type ClusterClientOption func(c *clusterClientConfig)

// clusterClientConfig holds the configuration required to initialize and manage a cluster client instance.
type clusterClientConfig struct {
	logger                    *log.Logger
	config                    *config.Client
	authentication            *config.Authentication
	hasher                    hasher.Hasher
	routingTableFetchInterval time.Duration
}

// WithHasher sets a custom hasher implementation to the cluster client configuration.
func WithHasher(h hasher.Hasher) ClusterClientOption {
	return func(cfg *clusterClientConfig) {
		cfg.hasher = h
	}
}

// WithLogger sets a custom logger for the cluster client configuration.
func WithLogger(l *log.Logger) ClusterClientOption {
	return func(cfg *clusterClientConfig) {
		cfg.logger = l
	}
}

// WithConfig applies a specified config.Client to the clusterClientConfig.
func WithConfig(c *config.Client) ClusterClientOption {
	return func(cfg *clusterClientConfig) {
		cfg.config = c
	}
}

// WithPassword configures a cluster client with the specified password for authentication.
func WithPassword(password string) ClusterClientOption {
	return func(cfg *clusterClientConfig) {
		cfg.authentication = &config.Authentication{
			Password: password,
		}
	}
}

// WithRoutingTableFetchInterval sets the interval for periodic fetching of the routing table in a cluster client configuration.
func WithRoutingTableFetchInterval(interval time.Duration) ClusterClientOption {
	return func(cfg *clusterClientConfig) {
		cfg.routingTableFetchInterval = interval
	}
}

// fetchRoutingTable updates the cluster routing table by fetching the latest version from the cluster.
// It initializes the partition count if it's the first invocation. Returns an error if fetching fails.
func (cl *ClusterClient) fetchRoutingTable() error {
	ctx, cancel := context.WithCancel(cl.ctx)
	defer cancel()

	routingTable, err := cl.RoutingTable(ctx)
	if err != nil {
		return fmt.Errorf("error while loading the routing table: %w", err)
	}

	previous := cl.routingTable.Load()
	if previous == nil {
		// First run. Partition count is a constant, actually. It has to be greater than zero.
		cl.partitionCount = uint64(len(routingTable))
	}
	cl.routingTable.Store(routingTable)
	return nil
}

// fetchRoutingTablePeriodically periodically updates the routing table by invoking fetchRoutingTable at configured intervals.
// It stops gracefully when the context is canceled or an error occurs.
func (cl *ClusterClient) fetchRoutingTablePeriodically() {
	defer cl.wg.Done()

	ticker := time.NewTicker(cl.config.routingTableFetchInterval)
	defer ticker.Stop()

	for {
		select {
		case <-cl.ctx.Done():
			return
		case <-ticker.C:
			err := cl.fetchRoutingTable()
			if err != nil {
				cl.logger.Printf("[ERROR] Failed to fetch the latest version of the routing table: %s", err)
			}
		}
	}
}

// NewClusterClient creates a new Client instance. It needs one node address at least to discover the whole cluster.
func NewClusterClient(addresses []string, options ...ClusterClientOption) (*ClusterClient, error) {
	if len(addresses) == 0 {
		return nil, fmt.Errorf("addresses cannot be empty")
	}

	var cc clusterClientConfig
	for _, opt := range options {
		opt(&cc)
	}

	if cc.hasher == nil {
		cc.hasher = hasher.NewDefaultHasher()
	}

	if cc.logger == nil {
		cc.logger = log.New(os.Stderr, "logger: ", log.Lshortfile)
	}

	if cc.config == nil {
		cc.config = config.NewClient()
	}

	if cc.authentication != nil {
		cc.config.Authentication = cc.authentication
	}

	if cc.routingTableFetchInterval <= 0 {
		cc.routingTableFetchInterval = DefaultRoutingTableFetchInterval
	}

	if err := cc.config.Sanitize(); err != nil {
		return nil, err
	}
	if err := cc.config.Validate(); err != nil {
		return nil, err
	}

	ctx, cancel := context.WithCancel(context.Background())
	cl := &ClusterClient{
		client: server.NewClient(cc.config),
		config: &cc,
		logger: cc.logger,
		ctx:    ctx,
		cancel: cancel,
	}

	// Initialize clients for the given cluster members.
	for _, address := range addresses {
		cl.client.Get(address)
	}

	// Discover all cluster members
	members, err := cl.Members(ctx)
	if err != nil {
		return nil, fmt.Errorf("error while discovering the cluster members: %w", err)
	}
	for _, member := range members {
		cl.client.Get(member.Name)
	}

	// Hash function is required to target primary owners instead of random cluster members.
	partitions.SetHashFunc(cc.hasher)

	// Initial fetch. ClusterClient targets the primary owners for a smooth and quick operation.
	if err := cl.fetchRoutingTable(); err != nil {
		return nil, err
	}

	// Refresh the routing table in every 15 seconds.
	cl.wg.Add(1)
	go cl.fetchRoutingTablePeriodically()

	return cl, nil
}

var (
	_ Client = (*ClusterClient)(nil)
	_ DMap   = (*ClusterDMap)(nil)
)


================================================
FILE: cluster_client_test.go
================================================
// Copyright 2018-2025 The Olric Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package olric

import (
	"context"
	"log"
	"os"
	"testing"
	"time"

	"github.com/olric-data/olric/config"
	"github.com/olric-data/olric/hasher"
	"github.com/olric-data/olric/internal/testutil"
	"github.com/olric-data/olric/stats"
	"github.com/stretchr/testify/require"
	"golang.org/x/sync/errgroup"
)

func TestClusterClient_Ping(t *testing.T) {
	cluster := newTestOlricCluster(t)
	cluster.addMember(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	response, err := c.Ping(ctx, db.rt.This().String(), "")
	require.NoError(t, err)
	require.Equal(t, DefaultPingResponse, response)
}

func TestClusterClient_Ping_WithMessage(t *testing.T) {
	cluster := newTestOlricCluster(t)
	cluster.addMember(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	message := "Olric is the best!"
	result, err := c.Ping(ctx, db.rt.This().String(), message)
	require.NoError(t, err)
	require.Equal(t, message, result)
}

func TestClusterClient_RoutingTable(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	rt, err := c.RoutingTable(ctx)
	require.NoError(t, err)

	require.Len(t, rt, int(db.config.PartitionCount))
}

func TestClusterClient_RoutingTable_Cluster(t *testing.T) {
	cluster := newTestOlricCluster(t)
	cluster.addMember(t) // Cluster coordinator
	<-time.After(250 * time.Millisecond)

	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	rt, err := c.RoutingTable(ctx)
	require.NoError(t, err)

	require.Len(t, rt, int(db.config.PartitionCount))
}

func TestClusterClient_Put(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	err = dm.Put(ctx, "mykey", "myvalue")
	require.NoError(t, err)
}

func TestClusterClient_Get(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	err = dm.Put(ctx, "mykey", "myvalue")
	require.NoError(t, err)

	gr, err := dm.Get(ctx, "mykey")
	require.NoError(t, err)

	res, err := gr.String()
	require.NoError(t, err)

	require.Equal(t, res, "myvalue")
}

func TestClusterClient_Delete(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	err = dm.Put(ctx, "mykey", "myvalue")
	require.NoError(t, err)

	count, err := dm.Delete(ctx, "mykey")
	require.NoError(t, err)
	require.Equal(t, 1, count)

	_, err = dm.Get(ctx, "mykey")
	require.ErrorIs(t, err, ErrKeyNotFound)
}

func TestClusterClient_Delete_Many_Keys(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	var keys []string
	for i := 0; i < 10; i++ {
		key := testutil.ToKey(i)
		err = dm.Put(context.Background(), key, "myvalue")
		require.NoError(t, err)
		keys = append(keys, key)
	}

	count, err := dm.Delete(context.Background(), keys...)
	require.NoError(t, err)
	require.Equal(t, 10, count)
}

func TestClusterClient_Destroy(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	err = dm.Put(ctx, "mykey", "myvalue")
	require.NoError(t, err)

	err = dm.Destroy(ctx)
	require.NoError(t, err)

	_, err = dm.Get(ctx, "mykey")
	require.ErrorIs(t, err, ErrKeyNotFound)
}

func TestClusterClient_Incr(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	var errGr errgroup.Group
	for i := 0; i < 10; i++ {
		errGr.Go(func() error {
			_, err = dm.Incr(ctx, "mykey", 1)
			return err
		})
	}

	require.NoError(t, errGr.Wait())

	result, err := dm.Incr(ctx, "mykey", 1)
	require.NoError(t, err)
	require.Equal(t, 11, result)
}

func TestClusterClient_IncrByFloat(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	var errGr errgroup.Group
	for i := 0; i < 10; i++ {
		errGr.Go(func() error {
			_, err = dm.IncrByFloat(ctx, "mykey", 1.2)
			return err
		})
	}

	require.NoError(t, errGr.Wait())

	result, err := dm.IncrByFloat(ctx, "mykey", 1.2)
	require.NoError(t, err)
	require.Equal(t, 13.199999999999998, result)
}

func TestClusterClient_Decr(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	err = dm.Put(ctx, "mykey", 11)
	require.NoError(t, err)

	var errGr errgroup.Group
	for i := 0; i < 10; i++ {
		errGr.Go(func() error {
			_, err = dm.Decr(ctx, "mykey", 1)
			return err
		})
	}

	require.NoError(t, errGr.Wait())

	result, err := dm.Decr(ctx, "mykey", 1)
	require.NoError(t, err)
	require.Equal(t, 0, result)
}

func TestClusterClient_GetPut(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	gr, err := dm.GetPut(ctx, "mykey", "myvalue")
	require.NoError(t, err)
	require.Nil(t, gr)

	gr, err = dm.GetPut(ctx, "mykey", "myvalue-2")
	require.NoError(t, err)

	value, err := gr.String()
	require.NoError(t, err)
	require.Equal(t, "myvalue", value)
}

func TestClusterClient_Expire(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	err = dm.Put(ctx, "mykey", "myvalue")
	require.NoError(t, err)

	err = dm.Expire(ctx, "mykey", time.Millisecond)
	require.NoError(t, err)

	<-time.After(time.Millisecond)

	_, err = dm.Get(ctx, "mykey")
	require.ErrorIs(t, err, ErrKeyNotFound)
}

func TestClusterClient_Lock_Unlock(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	lx, err := dm.Lock(ctx, "lock.foo.key", time.Second)
	require.NoError(t, err)

	err = lx.Unlock(ctx)
	require.NoError(t, err)
}

func TestClusterClient_Lock_Lease(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	lx, err := dm.Lock(ctx, "lock.foo.key", time.Second)
	require.NoError(t, err)

	err = lx.Lease(ctx, time.Millisecond)
	require.NoError(t, err)

	<-time.After(time.Millisecond)

	err = lx.Unlock(ctx)
	require.ErrorIs(t, err, ErrNoSuchLock)
}

func TestClusterClient_Lock_ErrLockNotAcquired(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	_, err = dm.Lock(ctx, "lock.foo.key", time.Second)
	require.NoError(t, err)

	_, err = dm.Lock(ctx, "lock.foo.key", time.Millisecond)
	require.ErrorIs(t, err, ErrLockNotAcquired)
}

func TestClusterClient_LockWithTimeout(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	lx, err := dm.LockWithTimeout(ctx, "lock.foo.key", time.Hour, time.Second)
	require.NoError(t, err)

	err = lx.Unlock(ctx)
	require.NoError(t, err)
}

func TestClusterClient_LockWithTimeout_ErrNoSuchLock(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	lx, err := dm.LockWithTimeout(ctx, "lock.foo.key", time.Millisecond, time.Second)
	require.NoError(t, err)

	<-time.After(time.Millisecond)

	err = lx.Unlock(ctx)
	require.ErrorIs(t, err, ErrNoSuchLock)
}

func TestClusterClient_LockWithTimeout_Then_Lease(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	lx, err := dm.LockWithTimeout(ctx, "lock.foo.key", 50*time.Millisecond, time.Second)
	require.NoError(t, err)

	// Expand its timeout value
	err = lx.Lease(ctx, time.Hour)
	require.NoError(t, err)

	<-time.After(100 * time.Millisecond)

	_, err = dm.Lock(ctx, "lock.foo.key", time.Millisecond)
	require.ErrorIs(t, err, ErrLockNotAcquired)
}

func TestClusterClient_LockWithTimeout_ErrLockNotAcquired(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	_, err = dm.LockWithTimeout(ctx, "lock.foo.key", time.Hour, time.Second)
	require.NoError(t, err)

	_, err = dm.Lock(ctx, "lock.foo.key", time.Millisecond)
	require.Equal(t, err, ErrLockNotAcquired)
}

func TestClusterClient_Put_Ex(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	err = dm.Put(ctx, "mykey", "myvalue", EX(time.Second))
	require.NoError(t, err)

	<-time.After(time.Second)

	_, err = dm.Get(ctx, "mykey")
	require.ErrorIs(t, err, ErrKeyNotFound)
}

func TestClusterClient_Put_PX(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	err = dm.Put(ctx, "mykey", "myvalue", PX(time.Millisecond))
	require.NoError(t, err)

	<-time.After(time.Millisecond)

	_, err = dm.Get(ctx, "mykey")
	require.ErrorIs(t, err, ErrKeyNotFound)
}

func TestClusterClient_Put_EXAT(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	err = dm.Put(ctx, "mykey", "myvalue", EXAT(time.Duration(time.Now().Add(time.Second).UnixNano())))
	require.NoError(t, err)

	<-time.After(time.Second)

	_, err = dm.Get(ctx, "mykey")
	require.ErrorIs(t, err, ErrKeyNotFound)
}

func TestClusterClient_Put_PXAT(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	err = dm.Put(ctx, "mykey", "myvalue", PXAT(time.Duration(time.Now().Add(time.Millisecond).UnixNano())))
	require.NoError(t, err)

	<-time.After(time.Millisecond)

	_, err = dm.Get(ctx, "mykey")
	require.ErrorIs(t, err, ErrKeyNotFound)
}

func TestClusterClient_Put_NX(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	err = dm.Put(ctx, "mykey", "myvalue")
	require.NoError(t, err)

	err = dm.Put(ctx, "mykey", "myvalue-2", NX())
	require.ErrorIs(t, err, ErrKeyFound)

	gr, err := dm.Get(ctx, "mykey")
	require.NoError(t, err)

	value, err := gr.String()
	require.NoError(t, err)
	require.Equal(t, "myvalue", value)
}

func TestClusterClient_Put_XX(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	err = dm.Put(ctx, "mykey", "myvalue-2", XX())
	require.ErrorIs(t, err, ErrKeyNotFound)
}

func TestClusterClient_Stats(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	var empty stats.Stats
	s, err := c.Stats(ctx, db.rt.This().String())
	require.NoError(t, err)
	require.Nil(t, s.Runtime)
	require.NotEqual(t, empty, s)
}

func TestClusterClient_Stats_Cluster(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)
	db2 := cluster.addMember(t)

	<-time.After(250 * time.Millisecond)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	var empty stats.Stats
	s, err := c.Stats(ctx, db2.rt.This().String())
	require.NoError(t, err)
	require.Nil(t, s.Runtime)
	require.NotEqual(t, empty, s)
	require.Equal(t, db2.rt.This().String(), s.Member.String())
}

func TestClusterClient_Stats_CollectRuntime(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	var empty stats.Stats
	s, err := c.Stats(ctx, db.rt.This().String(), CollectRuntime())
	require.NoError(t, err)
	require.NotNil(t, s.Runtime)
	require.NotEqual(t, empty, s)
}

func TestClusterClient_Set_Options(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()

	lg := log.New(os.Stderr, "logger: ", log.Lshortfile)
	cfg := config.NewClient()
	c, err := NewClusterClient([]string{db.name}, WithConfig(cfg), WithLogger(lg))
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	require.Equal(t, cfg, c.config.config)
	require.Equal(t, lg, c.config.logger)
}

func TestClusterClient_Members(t *testing.T) {
	cluster := newTestOlricCluster(t)
	cluster.addMember(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	members, err := c.Members(ctx)
	require.NoError(t, err)
	require.Len(t, members, 2)

	coordinator := db.rt.Discovery().GetCoordinator()
	for _, member := range members {
		require.NotEqual(t, "", member.Name)
		require.NotEqual(t, 0, member.ID)
		require.NotEqual(t, 0, member.Birthdate)
		if coordinator.ID == member.ID {
			require.True(t, member.Coordinator)
		} else {
			require.False(t, member.Coordinator)
		}
	}
}

func TestClusterClient_smartPick(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db1 := cluster.addMember(t)
	db2 := cluster.addMember(t)
	db3 := cluster.addMember(t)
	db4 := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient(
		[]string{db1.name, db2.name, db3.name, db4.name},
		WithHasher(hasher.NewDefaultHasher()),
	)
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	clients := make(map[string]struct{})
	for i := 0; i < 1000; i++ {
		rc, err := c.smartPick("mydmap", testutil.ToKey(i))
		require.NoError(t, err)
		clients[rc.String()] = struct{}{}
	}
	require.Len(t, clients, 4)
}


================================================
FILE: cluster_iterator.go
================================================
// Copyright 2018-2025 The Olric Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package olric

import (
	"context"
	"log"
	"sync"
	"time"

	"github.com/olric-data/olric/internal/dmap"
	"github.com/olric-data/olric/internal/protocol"
)

type currentCursor struct {
	primary uint64
	replica uint64
}

// ClusterIterator implements distributed query on DMaps.
type ClusterIterator struct {
	mtx             sync.Mutex // protects pos and page
	routingTableMtx sync.Mutex // protects routingTable and partitionCount

	logger         *log.Logger
	dm             *ClusterDMap
	clusterClient  *ClusterClient
	pos            int
	page           []string
	route          *Route
	partitionKeys  map[string]struct{}
	cursors        map[uint64]map[string]*currentCursor
	partID         uint64 // current partition id
	routingTable   RoutingTable
	partitionCount uint64
	config         *dmap.ScanConfig
	scanner        func() error
	wg             sync.WaitGroup
	ctx            context.Context
	cancel         context.CancelFunc
}

func (i *ClusterIterator) loadRoute() {
	i.routingTableMtx.Lock()
	defer i.routingTableMtx.Unlock()

	route, ok := i.routingTable[i.partID]
	if !ok {
		panic("partID: could not be found in the routing table")
	}
	i.route = &route
}

func (i *ClusterIterator) updateCursor(owner string, cursor uint64) {
	if _, ok := i.cursors[i.partID]; !ok {
		i.cursors[i.partID] = make(map[string]*currentCursor)
	}
	cc, ok := i.cursors[i.partID][owner]
	if !ok {
		cc = &currentCursor{}
		if i.config.Replica {
			cc.replica = cursor
		} else {
			cc.primary = cursor
		}
		i.cursors[i.partID][owner] = cc
		return
	}

	if i.config.Replica {
		cc.replica = cursor
	} else {
		cc.primary = cursor
	}
	i.cursors[i.partID][owner] = cc
}

func (i *ClusterIterator) loadCursor(owner string) uint64 {
	if _, ok := i.cursors[i.partID]; !ok {
		return 0
	}
	cc, ok := i.cursors[i.partID][owner]
	if !ok {
		return 0
	}
	if i.config.Replica {
		return cc.replica
	}
	return cc.primary
}

func (i *ClusterIterator) updateIterator(keys []string, cursor uint64, owner string) {
	for _, key := range keys {
		if _, ok := i.partitionKeys[key]; !ok {
			i.page = append(i.page, key)
			i.partitionKeys[key] = struct{}{}
		}
	}
	i.updateCursor(owner, cursor)
}

func (i *ClusterIterator) getOwners() []string {
	var raw []string
	if i.config.Replica {
		raw = i.routingTable[i.partID].ReplicaOwners
	} else {
		raw = i.routingTable[i.partID].PrimaryOwners
	}
	var owners []string
	// Make a safe copy of the raw.
	for _, owner := range raw {
		owners = append(owners, owner)
	}
	return owners
}

func (i *ClusterIterator) removeScannedOwner(idx int) {
	if i.config.Replica {
		if len(i.route.ReplicaOwners) > 0 && len(i.route.ReplicaOwners) > idx {
			i.route.ReplicaOwners = append(i.route.ReplicaOwners[:idx], i.route.ReplicaOwners[idx+1:]...)
		}
	} else {
		if len(i.route.PrimaryOwners) > 0 && len(i.route.PrimaryOwners) > idx {
			i.route.PrimaryOwners = append(i.route.PrimaryOwners[:idx], i.route.PrimaryOwners[idx+1:]...)
		}
	}
}

func (i *ClusterIterator) scanOnOwners() error {
	owners := i.getOwners()

	for idx, owner := range owners {
		cursor := i.loadCursor(owner)

		// Build a scan command here
		s := protocol.NewScan(i.partID, i.dm.Name(), cursor)
		if i.config.HasCount {
			s.SetCount(i.config.Count)
		}
		if i.config.HasMatch {
			s.SetMatch(i.config.Match)
		}
		if i.config.Replica {
			s.SetReplica()
		}

		scanCmd := s.Command(i.ctx)
		// Fetch a Redis client for the given owner.
		rc := i.clusterClient.client.Get(owner)
		err := rc.Process(i.ctx, scanCmd)
		if err != nil {
			return err
		}

		keys, newCursor, err := scanCmd.Result()
		if err != nil {
			return err
		}
		i.updateIterator(keys, newCursor, owner)
		if newCursor == 0 {
			i.removeScannedOwner(idx)
		}
	}
	return nil
}

func (i *ClusterIterator) resetPage() {
	if len(i.page) != 0 {
		i.page = []string{}
	}
	i.pos = 0
}

func (i *ClusterIterator) fetchData() error {
	i.config.Replica = false
	if err := i.scanner(); err != nil {
		return err
	}

	i.config.Replica = true
	return i.scanner()
}

func (i *ClusterIterator) reset() {
	i.partitionKeys = make(map[string]struct{})
	i.resetPage()
	i.loadRoute()
}

func (i *ClusterIterator) next() bool {
	if len(i.page) != 0 {
		i.pos++
		if i.pos <= len(i.page) {
			return true
		}
	}

	i.resetPage()

	for {
		if err := i.fetchData(); err != nil {
			i.logger.Printf("[ERROR] Failed to fetch data: %s", err)
			return false
		}
		if len(i.page) != 0 {
			// We have data on the page to read. Stop the iteration.
			break
		}

		if len(i.route.PrimaryOwners) == 0 && len(i.route.ReplicaOwners) == 0 {
			// We completed scanning all the owners. Stop the iteration.
			break
		}
	}

	if len(i.page) == 0 && len(i.route.PrimaryOwners) == 0 && len(i.route.ReplicaOwners) == 0 {
		i.partID++
		if i.partID >= i.partitionCount {
			return false
		}
		i.reset()
		return i.next()
	}
	i.pos = 1
	return true
}

// Next returns true if there is more key in the iterator implementation.
// Otherwise, it returns false
func (i *ClusterIterator) Next() bool {
	i.mtx.Lock()
	defer i.mtx.Unlock()

	select {
	case <-i.ctx.Done():
		return false
	default:
	}

	return i.next()
}

// Key returns a key name from the distributed map.
func (i *ClusterIterator) Key() string {
	i.mtx.Lock()
	defer i.mtx.Unlock()

	var key string
	if i.pos > 0 && i.pos <= len(i.page) {
		key = i.page[i.pos-1]
	}
	return key
}

func (i *ClusterIterator) fetchRoutingTablePeriodically() {
	defer i.wg.Done()

	for {
		select {
		case <-i.ctx.Done():
			return
		case <-time.After(time.Second):
			if err := i.fetchRoutingTable(); err != nil {
				i.logger.Printf("[ERROR] Failed to fetch the latest version of the routing table: %s", err)
			}
		}
	}
}

func (i *ClusterIterator) fetchRoutingTable() error {
	routingTable, err := i.clusterClient.RoutingTable(i.ctx)
	if err != nil {
		return err
	}

	i.routingTableMtx.Lock()
	defer i.routingTableMtx.Unlock()

	// Partition count is a constant, actually. It has to be greater than zero.
	i.partitionCount = uint64(len(routingTable))
	i.routingTable = routingTable
	return nil
}

// Close stops the iteration and releases allocated resources.
func (i *ClusterIterator) Close() {
	select {
	case <-i.ctx.Done():
		return
	default:
	}

	i.cancel()

	// await for routing table updater
	i.wg.Wait()
}


================================================
FILE: cluster_iterator_test.go
================================================
// Copyright 2018-2025 The Olric Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package olric

import (
	"context"
	"fmt"
	"testing"

	"github.com/olric-data/olric/internal/testutil"
	"github.com/stretchr/testify/require"
)

func TestClusterClient_ScanMatch(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	evenKeys := make(map[string]bool)
	for i := 0; i < 100; i++ {
		var key string
		if i%2 == 0 {
			key = fmt.Sprintf("even:%s", testutil.ToKey(i))
			evenKeys[key] = false
		} else {
			key = fmt.Sprintf("odd:%s", testutil.ToKey(i))
		}
		err = dm.Put(ctx, key, i)
		require.NoError(t, err)
	}
	i, err := dm.Scan(ctx, Match("^even:"))
	require.NoError(t, err)
	var count int
	defer i.Close()

	for i.Next() {
		count++
		require.Contains(t, evenKeys, i.Key())
	}
	require.Equal(t, 50, count)
}

func TestClusterClient_Scan(t *testing.T) {
	cl := newTestOlricCluster(t)
	db := cl.addMember(t)
	cl.addMember(t)

	ctx := context.Background()
	c, err := NewClusterClient([]string{db.name})
	require.NoError(t, err)
	defer func() {
		require.NoError(t, c.Close(ctx))
	}()

	dm, err := c.NewDMap("mydmap")
	require.NoError(t, err)

	allKeys := make(map[string]bool)
	for i := 0; i < 100; i++ {
		err = dm.Put(ctx, testutil.ToKey(i), i)
		require.NoError(t, err)
		allKeys[testutil.ToKey(i)] = false
	}

	i, err := dm.Scan(ctx)
	require.NoError(t, err)

	var count int
	defer i.Close()

	for i.Next() {
		count++
		require.Contains(t, allKeys, i.Key())
	}
	require.Equal(t, 100, count)
}


================================================
FILE: cluster_test.go
================================================
// Copyright 2018-2025 The Olric Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package olric

import (
	"context"
	"testing"

	"github.com/olric-data/olric/internal/protocol"
	"github.com/stretchr/testify/require"
)

func TestOlric_ClusterRoutingTable_clusterRoutingTableCommandHandler(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	rtCmd := protocol.NewClusterRoutingTable().Command(db.ctx)
	rc := db.client.Get(db.rt.This().String())
	err := rc.Process(db.ctx, rtCmd)
	require.NoError(t, err)
	slice, err := rtCmd.Slice()
	require.NoError(t, err)

	rt, err := mapToRoutingTable(slice)
	require.NoError(t, err)
	require.Len(t, rt, int(db.config.PartitionCount))
	for _, route := range rt {
		require.Len(t, route.PrimaryOwners, 1)
		require.Equal(t, db.rt.This().String(), route.PrimaryOwners[0])
		require.Len(t, route.ReplicaOwners, 0)
	}
}

func TestOlric_RoutingTable_Standalone(t *testing.T) {
	cluster := newTestOlricCluster(t)
	db := cluster.addMember(t)

	rt, err := db.routingTable(context.Background())
	require.NoError(t, err)
	require.Len(t, rt, int(db.config.PartitionCount))
	for _, route := range rt {
		require.Len(t, route.PrimaryOwners, 1)
		require.Equal(t, db.rt.This().String(), route.PrimaryOwners[0])
		require.Len(t, route.ReplicaOwners, 0)
	}
}


================================================
FILE: cmd/olric-server/main.go
================================================
// Copyright 2018-2025 The Olric Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Server implementation for Olric. Olric Server basically manages configuration for you.

package main

import (
	"context"
	"flag"
	"fmt"
	"io/ioutil"
	"os"
	"runtime"

	"github.com/olric-data/olric"
	"github.com/olric-data/olric/cmd/olric-server/server"
	"github.com/olric-data/olric/config"
	"github.com/sean-/seed"
)

func usage() {
	var msg = `Usage: olric-server [options] ...

Distributed key-value store and cache

Options:
  -h, --help    Print this message and exit.
  -v, --version Print the version number and exit.
  -c, --config  Sets configuration file path. Default is olric-server-local.yaml in the
                current folder. Set OLRIC_SERVER_CONFIG to overwrite it.

The Go runtime version %s
Report bugs to https://github.com/olric-data/olric/issues
`
	_, err := fmt.Fprintf(os.Stdout, msg, runtime.Version())
	if err != nil {
		panic(err)
	}
}

type arguments struct {
	config  string
	help    bool
	version bool
}

const (
	// DefaultConfigFile is the default configuration file path on a Unix-based operating system.
	DefaultConfigFile = "olric-server-local.yaml"

	// EnvConfigFile is the name of environment variable which can be used to override default configuration file path.
	EnvConfigFile = "OLRIC_SERVER_CONFIG"
)

func main() {
	args := &arguments{}

	// Parse command line parameters
	f := flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
	f.SetOutput(ioutil.Discard)
	f.BoolVar(&args.help, "h", false, "")
	f.BoolVar(&args.help, "help", false, "")

	f.BoolVar(&args.version, "version", false, "")
	f.BoolVar(&args.version, "v", false, "")

	f.StringVar(&args.config, "config", DefaultConfigFile, "")
	f.StringVar(&args.config, "c", DefaultConfigFile, "")

	if err := f.Parse(os.Args[1:]); err != nil {
		_, _ = fmt.Fprintf(os.Stderr, fmt.Sprintf("parsing error: %v\n", err))
		usage()
		os.Exit(1)
	}

	if args.version {
		_, _ = fmt.Fprintf(os.Stderr, "olric-server version %s %s %s/%s\n",
			olric.ReleaseVersion,
			runtime.Version(),
			runtime.GOOS,
			runtime.GOARCH,
		)
		return
	} else if args.help {
		usage()
		return
	}

	// MustInit provides guaranteed secure seeding.  If `/dev/urandom` is not
	// available, MustInit will panic() with an error indicating why reading from
	// `/dev/urandom` failed.  MustInit() will upgrade the seed if for some reason a
	// call to Init() failed in the past.
	seed.MustInit()

	envPath := os.Getenv(EnvConfigFile)
	if envPath != "" {
		args.config = envPath
	}

	c, err := config.Load(args.config)
	if err != nil {
		_, _ = fmt.Fprintf(os.Stderr, "failed to load the configuration file: %s: %v\n", args.config, err)
		os.Exit(1)
	}

	s, err := server.New(c)
	if err != nil {
		c.Logger.Fatalf("[ERROR] Failed to create a new Olric instance: %v", err)
	}

	if err = s.Start(); err != nil {
		c.Logger.Printf("[ERROR] Failed to start Olric: %v", err)

		ctx, cancel := context.WithCancel(context.Background())
		defer cancel()
		if err := s.Shutdown(ctx); err != nil {
			c.Logger.Printf("[ERROR] Failed to shutdown Olric: %v", err)
		}
		c.Logger.Fatal("[ERROR] Quit unexpectedly!")
	}

	c.Logger.Print("[INFO] Quit!")
}


================================================
FILE: cmd/olric-server/olric-server-local.yaml
================================================
#
# IMPORTANT NOTE: This configuration file is intended for testing and local development.
#
server:
  # BindAddr denotes the address that Olric will bind to for communication
  # with other Olric nodes.
  bindAddr: localhost

  # BindPort denotes the address that Olric will bind to for communication
  # with other Olric nodes.
  bindPort: 3320

  # KeepAlivePeriod denotes whether the operating system should send
  # keep-alive messages on the connection.
  keepAlivePeriod: 300s

  # IdleClose will automatically close idle connections after the specified duration.
  # Use zero to disable this feature.
  # idleClose: 300s

  # Timeout for bootstrap control
  #
  # An Olric node checks operation status before taking any action for the
  # cluster events, responding incoming requests and running API functions.
  # Bootstrapping status is one of the most important checkpoints for an
  # "operable" Olric node. BootstrapTimeout sets a deadline to check
  # bootstrapping status without blocking indefinitely.
  bootstrapTimeout: 5s

  # PartitionCount is 271, by default.
  partitionCount: 271

  # ReplicaCount is 1, by default.
  replicaCount: 1

  # Minimum number of successful writes to return a response for a write request.
  writeQuorum: 1

  # Minimum number of successful reads to return a response for a read request.
  readQuorum: 1

  # Switch to control read-repair algorithm which helps to reduce entropy.
  readRepair: false

  # Default value is SyncReplicationMode.
  replicationMode: 0 # sync mode. for async, set 1

  # Minimum number of members to form a cluster and run any query on the cluster.
  memberCountQuorum: 1

  # Coordinator member pushes the routing table to cluster members in the case of
  # node join or left events. It also pushes the table periodically. routingTablePushInterval
  # is the interval between subsequent calls. Default is 1 minute.
  routingTablePushInterval: 1m

  # Olric can send push cluster events to cluster.events channel. Available cluster events:
  #
  # * node-join-event
  # * node-left-event
  # * fragment-migration-event
  # * fragment-received-event
  #
  # If you want to receive these events, set true to EnableClusterEventsChannel and subscribe to
  # cluster.events channel. Default is false.
  enableClusterEventsChannel: true

#authentication:
  #password: "your-password"
  
client:
  # Timeout for TCP dial.
  #
  # The timeout includes name resolution, if required. When using TCP, and the host in the address parameter
  # resolves to multiple IP addresses, the timeout is spread over each consecutive dial, such that each is
  # given an appropriate fraction of the time to connect.
  dialTimeout: 5s

  # Timeout for socket reads. If reached, commands will fail
  # with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.
  # Default is DefaultReadTimeout
  readTimeout: 3s

  # Timeout for socket writes. If reached, commands will fail
  # with a timeout instead of blocking.
  # Default is DefaultWriteTimeout
  writeTimeout: 3s

  # Maximum number of retries before giving up.
  # Default is 3 retries; -1 (not 0) disables retries.
  #maxRetries: 3

  # Minimum backoff between each retry.
  # Default is 8 milliseconds; -1 disables backoff.
  #minRetryBackoff: 8ms

  # Maximum backoff between each retry.
  # Default is 512 milliseconds; -1 disables backoff.
  #maxRetryBackoff: 512ms

  # Type of connection pool.
  # true for FIFO pool, false for LIFO pool.
  # Note that fifo has higher overhead compared to lifo.
  #poolFIFO: false

  # Maximum number of socket connections.
  # Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.
  #poolSize: 0

  # Minimum number of idle connections which is useful when establishing
  # new connection is slow.
  #minIdleConns:

  # Connection age at which client retires (closes) the connection.
  # Default is to not close aged connections.
  #maxConnAge:

  # Amount of time client waits for connection if all connections are busy before
  # returning an error. Default is ReadTimeout + 1 second.
  #poolTimeout: 3s

  # Amount of time after which client closes idle connections.
  # Should be less than server's timeout.
  # Default is 5 minutes. -1 disables idle timeout check.
  #idleTimeout: 5m

  # Frequency of idle checks made by idle connections reaper.
  # Default is 1 minute. -1 disables idle connections reaper,
  # but idle connections are still discarded by the client
  # if IdleTimeout is set.
  #idleCheckFrequency: 1m


logging:
  # DefaultLogVerbosity denotes default log verbosity level.
  #
  # * 1 - Generally useful for this to ALWAYS be visible to an operator
  #   * Programmer errors
  #   * Logging extra info about a panic
  #   * CLI argument handling
  # * 2 - A reasonable default log level if you don't want verbosity.
  #   * Information about config (listening on X, watching Y)
  #   * Errors that repeat frequently that relate to conditions that can be
  #     corrected
  # * 3 - Useful steady state information about the service and
  #     important log messages that may correlate to
  #   significant changes in the system.  This is the recommended default log
  #     level for most systems.
  #   * Logging HTTP requests and their exit code
  #   * System state changing
  #   * Controller state change events
  #   * Scheduler log messages
  # * 4 - Extended information about changes
  #   * More info about system state changes
  # * 5 - Debug level verbosity
  #   * Logging in particularly thorny parts of code where you may want to come
  #     back later and check it
  # * 6 - Trace level verbosity
  #   * Context to understand the steps leading up to neterrors and warnings
  #   * More information for troubleshooting reported issues
  verbosity: 3

  # Default LogLevel is DEBUG. Available levels: "DEBUG", "WARN", "ERROR", "INFO"
  level: WARN
  output: stderr

memberlist:
  environment: local

  # Configuration related to what address to bind to and ports to
  # listen on. The port is used for both UDP and TCP gossip. It is
  # assumed other nodes are running on this port, but they do not need
  # to.
  bindAddr: localhost
  bindPort: 3322

  # EnableCompression is used to control message compression. This can
  # be used to reduce bandwidth usage at the cost of slightly more CPU
  # utilization. This is only available starting at protocol version 1.
  enableCompression: false

  # JoinRetryInterval is the time gap between attempts to join an existing
  # cluster.
  joinRetryInterval: 1ms

  # MaxJoinAttempts denotes the maximum number of attemps to join an existing
  # cluster before forming a new one.
  maxJoinAttempts: 1

  # See service discovery plugins
  #peers:
  #  - "localhost:3325"

  #advertiseAddr: ""
  #advertisePort: 3322
  #suspicionMaxTimeoutMult: 6
  #disableTCPPings: false
  #awarenessMaxMultiplier: 8
  #gossipNodes: 3
  #gossipVerifyIncoming: true
  #gossipVerifyOutgoing: true
  #dnsConfigPath: "/etc/resolv.conf"
  #handoffQueueDepth: 1024
  #udpBufferSize: 1400

dmaps:
  engine:
    name: ramblock
    config:
      tableSize: 524288 # bytes
#  checkEmptyFragmentsInterval: 1m
#  triggerCompactionInterval: 10m
#  numEvictionWorkers: 1
#  maxIdleDuration: ""
#  ttlDuration: "100s"
#  maxKeys: 100000
#  maxInuse: 1000000
#  lRUSamples: 10
#  evictionPolicy: "LRU"
#  custom:
#   foobar:
#      maxIdleDuration: "60s"
#      ttlDuration: "300s"
#      maxKeys: 500000
#      lRUSamples: 20
#      evictionPolicy: "NONE"


#serviceDiscovery:
#  # path is a required property and used by Olric. It has to be a full path.
#  path: "/home/burak/go/src/github.com/olric-data/olric-consul-plugin/consul.so"
#
#  # provider is just informal,
#  provider: "consul"
#
#  # Plugin specific configuration
#  # Consul server, used by the plugin. It's required
#  address: "http://127.0.0.1:8500"
#
#  # Specifies that the server should return only nodes with all checks in the passing state.
#  passingOnly: true
#
#  # Missing health checks from the request will be deleted from the agent. Using this parameter
#  # allows to idempotently register a service and its checks without having to manually deregister
#  # checks.
#  replaceExistingChecks: true
#
#  # InsecureSkipVerify controls whether a client verifies the
#  # server's certificate chain and host name.
#  # If InsecureSkipVerify is true, TLS accepts any certificate
#  # presented by the server and any host name in that certificate.
#  # In this mode, TLS is susceptible to man-in-the-middle attacks.
#  # This should be used only for testing.
#  insecureSkipVerify: true
#
#  # service record
#  payload: '
#      {
#          "Name": "olric-cluster",
#          "ID": "olric-node-1",
#          "Tags": [
#            "primary",
#            "v1"
#          ],
#          "Address": "localhost",
#          "Port": 3322,
#          "EnableTagOverride": false,
#          "check": {
#            "name": "Olric node on 3322",
#            "tcp": "0.0.0.0:3322",
#            "interval": "10s",
#            "timeout": "1s"
#          }
#      }
#'
#
#
#serviceDiscovery:
#  provider: "k8s"
#  path: "/Users/buraksezer/go/src/github.com/olric-data/olric-cloud-plugin/olric-cloud-plugin.so"
#  args: 'label_selector="app = olric-server"'


================================================
FILE: cmd/olric-server/server/server.go
================================================
// Copyright 2018-2025 The Olric Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

/*Package server provides a standalone server implementation for Olric*/
package server

import (
	"context"
	"log"
	"os"
	"os/signal"
	"syscall"

	"github.com/olric-data/olric"
	"github.com/olric-data/olric/config"
	"golang.org/x/sync/errgroup"
)

// OlricServer represents an instance of the Olric distributed in-memory data structure store.
// It encapsulates logging, configuration, the Olric database instance, and an error group for
// concurrency management.
type OlricServer struct {
	log    *log.Logger
	config *config.Config
	db     *olric.Olric
	errGr  errgroup.Group
}

// New initializes a new OlricServer instance using the provided configuration and returns it or an error.
func New(c *config.Config) (*OlricServer, error) {
	db, err := olric.New(c)
	if err != nil {
		return nil, err
	}
	return &OlricServer{
		config: c,
		log:    c.Logger,
		db:     db,
	}, nil
}

// waitForInterrupt waits for termination signals (SIGTERM, SIGINT) to gracefully shut down the Olric server instance.
func (s *OlricServer) waitForInterrupt() {
	shutDownChan := make(chan os.Signal, 1)
	signal.Notify(shutDownChan, syscall.SIGTERM, syscall.SIGINT)
	ch := <-shutDownChan
	s.log.Printf("[INFO] Signal catched: %s", ch.String())

	// Awaits for shutdown
	s.errGr.Go(func() error {
		ctx, cancel := context.WithCancel(context.Background())
		defer cancel()

		if err := s.db.Shutdown(ctx); err != nil {
			s.log.Printf("[ERROR] Failed to shutdown Olric: %v", err)
			return err
		}

		return nil
	})

	// This is not a goroutine leak. The process will quit.
	go func() {
		s.log.Printf("[INFO] Awaiting for background tasks")
		s.log.Printf("[INFO] Press CTRL+C or send SIGTERM/SIGINT to quit immediately")

		forceQuitCh := make(chan os.Signal, 1)
		signal.Notify(forceQuitCh, syscall.SIGTERM, syscall.SIGINT)
		ch := <-forceQuitCh

		s.log.Printf("[INFO] Signal caught: %s", ch.String())
		s.log.Printf("[INFO] Quits with exit code 1")
		os.Exit(1)
	}()
}

// Start launches the Olric server instance and begins listening for incoming requests and termination signals.
func (s *OlricServer) Start() error {
	s.log.Printf("[INFO] pid: %d has been started", os.Getpid())
	// Wait for SIGTERM or SIGINT
	go s.waitForInterrupt()

	s.errGr.Go(func() error {
		return s.db.Start()
	})

	return s.errGr.Wait()
}

// Shutdown gracefully stops the Olric server instance, releasing resources and ensuring a clean termination.
func (s *OlricServer) Shutdown(ctx context.Context) error {
	return s.db.Shutdown(ctx)
}


================================================
FILE: config/authentication.go
================================================
// Copyright 2018-2025 The Olric Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package config

import "strings"

type Authentication struct {
	Password string
}

// Sanitize ensures the Authentication configuration is pre-processed and prepared for use, with no changes currently applied.
func (a *Authentication) Sanitize() error {
	a.Password = strings.TrimSpace(a.Password)
	return nil
}

// Validate checks the current Authentication configuration for validity and returns an error if issues are found.
func (a *Authentication) Validate() error {
	// Nothing to do
	return nil
}

// Enabled checks if authentication is enabled by verifying if the password is set and returns true if it is configured.
func (a *Authentication) Enabled() bool {
	return len(a.Password) > 0
}

// Interface guard
var _ IConfig = (*Authentication)(nil)


================================================
FILE: config/client.go
================================================
// Copyright 2018-2025 The Olric Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package config

import (
	"context"
	"crypto/tls"
	"fmt"
	"net"
	"runtime"
	"time"

	"github.com/redis/go-redis/v9"
)

const (
	DefaultDialTimeout     = 5 * time.Second
	DefaultKeepalive       = 5 * time.Minute
	DefaultReadTimeout     = 3 * time.Second
	DefaultIdleTimeout     = 5 * time.Minute
	DefaultMinRetryBackoff = 8 * time.Millisecond
	DefaultMaxRetryBackoff = 512 * time.Millisecond
	DefaultMaxRetries      = 3
)

// Client denotes configuration for TCP clients in Olric and the official Golang client.
type Client struct {
	Authentication *Authentication

	// Dial timeout for establishing new connections.
	// Default is 5 seconds.
	DialTimeout time.Duration

	// Timeout for socket reads. If reached, commands will fail
	// with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.
	// Default is 3 seconds.
	ReadTimeout time.Duration

	// Timeout for socket writes. If reached, commands will fail
	// with a timeout instead of blocking.
	// Default is ReadTimeout.
	WriteTimeout time.Duration

	// Dialer creates new network connection and has priority over
	// Network and Addr options.
	Dialer func(ctx context.Context, network, addr string) (net.Conn, error)

	// Hook that is called when new connection is established.
	OnConnect func(ctx context.Context, cn *redis.Conn) error

	// Maximum number of retries before giving up.
	// Default is 3 retries; -1 (not 0) disables retries.
	MaxRetries int

	// Minimum backoff between each retry.
	// Default is 8 milliseconds; -1 disables backoff.
	MinRetryBackoff time.Duration

	// Maximum backoff between each retry.
	// Default is 512 milliseconds; -1 disables backoff.
	MaxRetryBackoff time.Duration

	// Type of connection pool.
	// true for FIFO pool, false for LIFO pool.
	// Note that fifo has higher overhead compared to lifo.
	PoolFIFO bool

	// Maximum number of socket connections.
	// Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.
	PoolSize int

	// Minimum number of idle connections which is useful when establishing
	// new connection is slow.
	MinIdleConns int

	// Connection age at which client retires (closes) the connection.
	// Default is to not close aged connections.
	MaxConnAge time.Duration

	// Amount of time client waits for connection if all connections
	// are busy before returning an error.
	// Default is ReadTimeout + 1 second.
	PoolTimeout time.Duration

	// Amount of time after which client closes idle connections.
	// Should be less than server's timeout.
	// Default is 5 minutes. -1 disables idle timeout check.
	IdleTimeout time.Duration

	// TLS Config to use. When set TLS will be negotiated.
	TLSConfig *tls.Config

	// Limiter interface used to implemented circuit breaker or rate limiter.
	Limiter redis.Limiter
}

// NewClient returns a new configuration object for clients.
func NewClient() *Client {
	c := &Client{
		Authentication: &Authentication{},
	}
	err := c.Sanitize()
	if err != nil {
		panic(fmt.Sprintf("failed to create a new client configuration: %v", err))
	}
	return c
}

// Sanitize sets default values to empty configuration variables, if it's possible.
func (c *Client) Sanitize() error {
	if err := c.Authentication.Sanitize(); err != nil {
		return fmt.Errorf("failed to sanitize authentication configuration: %w", err)
	}

	if c.DialTimeout == 0 {
		c.DialTimeout = DefaultDialTimeout
	}
	if c.Dialer == nil {
		c.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) {
			netDialer := &net.Dialer{
				Timeout:   c.DialTimeout,
				KeepAlive: DefaultKeepalive,
			}
			if c.TLSConfig == nil {
				return netDialer.DialContext(ctx, network, addr)
			}
			return tls.DialWithDialer(netDialer, network, addr, c.TLSConfig)
		}
	}
	if c.PoolSize == 0 {
		c.PoolSize = 10 * runtime.GOMAXPROCS(0)
	}
	switch c.ReadTimeout {
	case -1:
		c.ReadTimeout = 0
	case 0:
		c.ReadTimeout = DefaultReadTimeout
	}
	switch c.WriteTimeout {
	case -1:
		c.WriteTimeout = 0
	case 0:
		c.WriteTimeout = c.ReadTimeout
	}
	if c.PoolTimeout == 0 {
		c.PoolTimeout = c.ReadTimeout + time.Second
	}
	if c.IdleTimeout == 0 {
		c.IdleTimeout = DefaultIdleTimeout
	}

	if c.MaxRetries == -1 {
		c.MaxRetries = 0
	} else if c.MaxRetries == 0 {
		c.MaxRetries = DefaultMaxRetries
	}
	switch c.MinRetryBackoff {
	case -1:
		c.MinRetryBackoff = 0
	case 0:
		c.MinRetryBackoff = DefaultMinRetryBackoff
	}
	switch c.MaxRetryBackoff {
	case -1:
		c.MaxRetryBackoff = 0
	case 0:
		c.MaxRetryBackoff = DefaultMaxRetryBackoff
	}

	return nil
}

// Validate finds errors in the current configuration.
func (c *Client) Validate() error {
	if err := c.Authentication.Validate(); err != nil {
		return fmt.Errorf("failed to validate authentication configuration: %w", err)
	}
	return nil
}

func (c *Client) RedisOptions() *redis.Options {
	// Note: IdleCheckFrequency is gone since go-redis no longer checks idle connections.
	// See https://github.com/redis/go-redis/discussions/2635
	options := &redis.Options{
		Network:         "tcp",
		Dialer:          c.Dialer,
		OnConnect:       c.OnConnect,
		MaxRetries:      c.MaxRetries,
		MinRetryBackoff: c.MinRetryBackoff,
		MaxRetryBackoff: c.MaxRetryBackoff,
		DialTimeout:     c.DialTimeout,
		ReadTimeout:     c.ReadTimeout,
		WriteTimeout:    c.WriteTimeout,
		PoolFIFO:        c.PoolFIFO,
		PoolSize:        c.PoolSize,
		MinIdleConns:    c.MinIdleConns,
		ConnMaxLifetime: c.MaxConnAge,
		PoolTimeout:     c.PoolTimeout,
		ConnMaxIdleTime: c.IdleTimeout,
		TLSConfig:       c.TLSConfig,
		Limiter:         c.Limiter,
	}
	if c.Authentication.Enabled() {
		options.Password = c.Authentication.Password
	}
	return options
}

// Interface guard
var _ IConfig = (*Client)(nil)


================================================
FILE: config/config.go
================================================
// Copyright 2018-2025 The Olric Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package config

import (
	"fmt"
	"io"
	"log"
	"net"
	"os"
	"strconv"
	"time"

	"github.com/hashicorp/memberlist"
	"github.com/olric-data/olric/hasher"
)

// IConfig is an interface that has to be implemented by Config and its nested
// structs. It provides a clear and granular way to sanitize and validate
// the configuration.
type IConfig interface {
	// Sanitize methods should be used to set defaults.
	Sanitize() error

	// Validate method should be used to find configuration errors.
	Validate() error
}

const (
	// SyncReplicationMode enables sync replication mode which means that the
	// caller is blocked until write/delete operation is applied by replica
	// owners. The default mode is SyncReplicationMode
	SyncReplicationMode = 0

	// AsyncReplicationMode enables async replication mode which means that
	// write/delete operations are done in a background task.
	AsyncReplicationMode = 1
)

const (
	LogLevelDebug = "DEBUG"
	LogLevelWarn  = "WARN"
	LogLevelError = "ERROR"
	LogLevelInfo  = "INFO"
)

const (
	// DefaultPort is for Olric
	DefaultPort = 3320

	// DefaultDiscoveryPort is for memberlist
	DefaultDiscoveryPort = 3322

	// DefaultPartitionCount denotes default partition count in the cluster.
	DefaultPartitionCount = 271

	// DefaultLoadFactor is used by the consistent hashing function. Keep it small.
	DefaultLoadFactor = 1.25

	// DefaultLogLevel determines the log level without extra configuration.
	// It's DEBUG.
	DefaultLogLevel = LogLevelDebug

	// DefaultLogVerbosity denotes default log verbosity level.
	//
	// * flog.V(1) - Generally useful for this to ALWAYS be visible to an operator
	//   * Programmer errors
	//   * Logging extra info about a panic
	//   * CLI argument handling
	// * flog.V(2) - A reasonable default log level if you don't want verbosity.
	//   * Information about config (listening on X, watching Y)
	//   * Errors that repeat frequently that relate to conditions that can be
	//     corrected (pod detected as unhealthy)
	// * flog.V(3) - Useful steady state information about the service and
	//     important log messages that may correlate to
	//   significant changes in the system.  This is the recommended default log
	//     level for most systems.
	//   * Logging HTTP requests and their exit code
	//   * System state changing (killing pod)
	//   * Controller state change events (starting pods)
	//   * Scheduler log messages
	// * flog.V(4) - Extended information about changes
	//   * More info about system state changes
	// * flog.V(5) - Debug level verbosity
	//   * Logging in particularly thorny parts of code where you may want to come
	//     back later and check it
	// * flog.V(6) - Trace level verbosity
	//   * Context to understand the steps leading up to neterrors and warnings
	//   * More information for troubleshooting reported issues
	DefaultLogVerbosity = 3

	// MinimumReplicaCount denotes default and minimum replica count in an Olric
	// cluster.
	MinimumReplicaCount = 1

	// DefaultBootstrapTimeout denotes default timeout value to check bootstrapping
	// status.
	DefaultBootstrapTimeout = 10 * time.Second

	// DefaultJoinRetryInterval denotes a time gap between sequential join attempts.
	DefaultJoinRetryInterval = time.Second

	// DefaultMaxJoinAttempts denotes a maximum number of failed join attempts
	// before forming a standalone cluster.
	DefaultMaxJoinAttempts = 10

	// MinimumMemberCountQuorum denotes minimum required count of members to form
	// a cluster.
	MinimumMemberCountQuorum = 1

	// DefaultLRUSamples is a sane default for randomly selected keys
	// in approximate LRU implementation. It's 5.
	DefaultLRUSamples int = 5

	// LRUEviction assigns this as EvictionPolicy in order to enable LRU eviction
	// algorithm.
	LRUEviction EvictionPolicy = "LRU"

	// DefaultStorageEngine denotes the storage engine implementation provided by
	// Olric project.
	DefaultStorageEngine = "ramblock"

	// DefaultRoutingTablePushInterval is interval between routing table push events.
	DefaultRoutingTablePushInterval = time.Minute

	// DefaultTriggerBalancerInterval is interval between two sequential call of balancer worker.
	DefaultTriggerBalancerInterval = 15 * time.Second

	// DefaultCheckEmptyFragmentsInterval is the default value of interval between
	// two sequential call of empty fragment cleaner. It's one minute by default.
	DefaultCheckEmptyFragmentsInterval = time.Minute

	// DefaultTriggerCompactionInterval is the default value of interval between
	// two sequential call of compaction workers. The compaction worker works until
	// its work is done. It's 10 minutes by default.
	DefaultTriggerCompactionInterval = 10 * time.Minute

	// DefaultLeaveTimeout is the default value of maximum amount of time before
	DefaultLeaveTimeout = 5 * time.Second

	DefaultReadQuorum        = 1
	DefaultWriteQuorum       = 1
	DefaultMemberCountQuorum = 1

	// DefaultKeepAlivePeriod is the default value of TCP keepalive. It's 300 seconds.
	// This option is useful in order to detect dead peers (clients that cannot
	// be reached even if they look connected). Moreover, if there is network
	// equipment between clients and servers that need to see some traffic in
	// order to take the connection open, the option will prevent unexpected
	// connection closed events.
	DefaultKeepAlivePeriod = 300 * time.Second
)

// Config represents the configuration structure for customizing the behavior and properties of Olric.
type Config struct {
	// Authentication defines authentication settings, including password protection, for securing access.
	Authentication *Authentication

	// Interface denotes a binding interface. It can be used instead of BindAddr
	// if the interface is known but not the address. If both are provided, then
	// Olric verifies that the interface has the bind address that is provided.
	Interface string

	// LogVerbosity denotes the level of message verbosity. The default value
	// is 3. Valid values are between 1 to 6.
	LogVerbosity int32

	// Default LogLevel is DEBUG. Available levels: "DEBUG", "WARN", "ERROR", "INFO"
	LogLevel string

	// BindAddr denotes the address that Olric will bind to for communication
	// with other Olric nodes.
	BindAddr string

	// BindPort denotes the address that Olric will bind to for communication
	// with other Olric nodes.
	BindPort int

	// Client denotes configuration for TCP clients in Olric and the official
	// Golang client.
	Client *Client

	// KeepAlivePeriod denotes whether the operating system should send
	// keep-alive messages on the connection.
	KeepAlivePeriod time.Duration

	// IdleClose will automatically close idle connections after the specified duration.
	// Use zero to disable this feature.
	IdleClose time.Duration

	// Timeout for bootstrap control
	//
	// An Olric node checks operation status before taking any action for the
	// cluster events, responding incoming requests and running API functions.
	// Bootstrapping status is one of the most important checkpoints for an
	// "operable" Olric node. BootstrapTimeout sets a deadline to check
	// bootstrapping status without blocking indefinitely.
	BootstrapTimeout time.Duration

	// Coordinator member pushes the routing table to cluster members in the case of
	// node join or left events. It also pushes the table periodically. RoutingTablePushInterval
	// is the interval between subsequent calls. Default is 1 minute.
	RoutingTablePushInterval time.Duration

	// TriggerBalancerInterval is interval between two sequential call of balancer worker.
	TriggerBalancerInterval time.Duration

	// The list of host:port which are used by memberlist for discovery.
	// Don't confuse it with Name.
	Peers []string

	// PartitionCount is 271, by default.
	PartitionCount uint64

	// ReplicaCount is 1, by default.
	ReplicaCount int

	// Minimum number of successful reads to return a response for a read request.
	ReadQuorum int

	// Minimum number of successful writes to return a response for a write request.
	WriteQuorum int

	// Minimum number of members to form a cluster and run any query on the cluster.
	MemberCountQuorum int32

	// Switch to control read-repair algorithm which helps to reduce entropy.
	ReadRepair bool

	// Default value is SyncReplicationMode.
	ReplicationMode int

	// LoadFactor is used by consistent hashing function. It determines the maximum
	// load for a server in the cluster. Keep it small.
	LoadFactor float64

	// Olric can send push cluster events to cluster.events channel. Available cluster events:
	//
	// * node-join-event
	// * node-left-event
	// * fragment-migration-event
	// * fragment-received-event
	//
	// If you want to receive these events, set true to EnableClusterEventsChannel and subscribe to
	// cluster.events channel. Default is false.
	EnableClusterEventsChannel bool

	// Default hasher is github.com/cespare/xxhash/v2
	Hasher hasher.Hasher

	// LogOutput is the writer where logs should be sent when no custom logger
	// is provided. If unset, stderr is used by default.
	// If Logger is set, LogOutput is ignored.
	LogOutput io.Writer

	// Logger is a user-provided custom logger. When this is set, Olric will use
	// it as-is and will not inspect or modify LogOutput.
	Logger *log.Logger

	// DMaps denotes a global configuration for DMaps. You can still overwrite it
	// by setting a DMap for a particular distributed map via DMaps.Custom field.
	// Most of the fields are related with distributed cache implementation.
	DMaps *DMaps

	// JoinRetryInterval is the time gap between attempts to join an existing
	// cluster.
	JoinRetryInterval time.Duration

	// MaxJoinAttempts denotes the maximum number of attempts to join an existing
	// cluster before forming a new one.
	MaxJoinAttempts int

	// Callback function. Olric calls this after
	// the server is ready to accept new connections.
	Started func()

	// ServiceDiscovery is a map that contains plugins implement ServiceDiscovery
	// interface. See pkg/service_discovery/service_discovery.go for details.
	ServiceDiscovery map[string]interface{}

	// Interface denotes a binding interface. It can be used instead of
	// memberlist.Loader.BindAddr if the interface is known but not the address.
	// If both are provided, then Olric verifies that the interface has the bind
	// address that is provided.
	MemberlistInterface string

	// Olric will broadcast a leave message but will not shut down the background
	// listeners, meaning the node will continue participating in gossip and state
	// updates.
	//
	// Sending a leave message will block until the leave message is successfully
	// broadcast to a member of the cluster, if any exist or until a specified timeout
	// is reached.
	LeaveTimeout time.Duration

	// MemberlistConfig is the memberlist configuration that Olric will
	// use to do the underlying membership management and gossip. Some
	// fields in the MemberlistConfig will be overwritten by Olric no
	// matter what:
	//
	//   * Name - This will always be set to the same as the NodeName
	//     in this configuration.
	//
	//   * ClusterEvents - Olric uses a custom event delegate.
	//
	//   * Delegate - Olric uses a custom delegate.
	//
	// You have to use NewMemberlistConfig to create a new one.
	// Then, you may need to modify it to tune for your environment.
	MemberlistConfig *memberlist.Config
}

// Validate finds errors in the current configuration.
func (c *Config) Validate() error {
	if c.ReplicaCount < MinimumReplicaCount {
		return fmt.Errorf("cannot specify ReplicaCount smaller than MinimumReplicaCount")
	}

	if c.ReadQuorum <= 0 {
		return fmt.Errorf("cannot specify ReadQuorum less than or equal to zero")
	}
	if c.ReplicaCount < c.ReadQuorum {
		return fmt.Errorf("cannot specify ReadQuorum greater than ReplicaCount")
	}

	if c.WriteQuorum <= 0 {
		return fmt.Errorf("cannot specify WriteQuorum less than or equal to zero")
	}
	if c.ReplicaCount < c.WriteQuorum {
		return fmt.Errorf("cannot specify WriteQuorum greater than ReplicaCount")
	}

	if err := c.validateMemberlistConfig(); err != nil {
		return err
	}

	if c.MemberCountQuorum < MinimumMemberCountQuorum {
		return fmt.Errorf("cannot specify MemberCountQuorum smaller than MinimumMemberCountQuorum")
	}

	if c.BindAddr == "" {
		return fmt.Errorf("bindAddr cannot be empty")
	}

	if c.BindPort == 0 {
		return fmt.Errorf("bindPort cannot be empty or zero")
	}

	// Check peers. If Peers slice contains node's itself, return an error.
	port := strconv.Itoa(c.MemberlistConfig.BindPort)
	this := net.JoinHostPort(c.MemberlistConfig.BindAddr, port)
	for _, peer := range c.Peers {
		if this == peer {
			return fmt.Errorf("cannot be peer with itself")
		}
	}

	if err := c.Client.Validate(); err != nil {
		return fmt.Errorf("failed to validate client configuration: %w", err)
	}

	if err := c.DMaps.Validate(); err != nil {
		return fmt.Errorf("failed to validate DMap configuration: %w", err)
	}

	if err := c.Authentication.Validate(); err != nil {
		return fmt.Errorf("failed to sanitize authentication configuration: %w", err)
	}

	switch c.LogLevel {
	case LogLevelDebug, LogLevelWarn, LogLevelInfo, LogLevelError:
	default:
		return fmt.Errorf("invalid LogLevel: %s", c.LogLevel)
	}

	return nil
}

// Sanitize sets default values to empty configuration variables, if it's possible.
func (c *Config) Sanitize() error {
	if c.LogOutput == nil {
		c.LogOutput = os.Stderr
	}

	if c.LogLevel == "" {
		c.LogLevel = DefaultLogLevel
	}

	if c.LogVerbosity <= 0 {
		c.LogVerbosity = DefaultLogVerbosity
	}

	if c.Logger == nil {
		c.Logger = log.New(c.LogOutput, "", log.LstdFlags)
	}

	if c.Hasher == nil {
		c.Hasher = hasher.NewDefaultHasher()
	}

	if c.BindAddr == "" {
		name, err := os.Hostname()
		if err != nil {
			return fmt.Errorf("failed to read hostname from kernel: %w", err)
		}
		c.BindAddr = name
	}
	// We currently don't support ephemeral port selection. Because it needs
	// improved flow control in server initialization stage.
	if c.BindPort == 0 {
		c.BindPort = DefaultPort
	}

	if c.LoadFactor == 0 {
		c.LoadFactor = DefaultLoadFactor
	}
	if c.PartitionCount == 0 {
		c.PartitionCount = DefaultPartitionCount
	}
	if c.ReplicaCount == 0 {
		c.ReplicaCount = MinimumReplicaCount
	}

	if c.ReadQuorum == 0 {
		c.ReadQuorum = DefaultReadQuorum
	}
	if c.WriteQuorum == 0 {
		c.WriteQuorum = DefaultWriteQuorum
	}

	if c.MemberCountQuorum == 0 {
		c.MemberCountQuorum = DefaultMemberCountQuorum
	}

	if c.MemberlistConfig == nil {
		m := memberlist.DefaultLocalConfig()
		// hostname is assigned to memberlist.BindAddr
		// memberlist.Name is assigned by olric.New
		m.BindPort = DefaultDiscoveryPort
		m.AdvertisePort = DefaultDiscoveryPort
		c.MemberlistConfig = m
	}

	if c.BootstrapTimeout == 0 {
		c.BootstrapTimeout = DefaultBootstrapTimeout
	}
	if c.JoinRetryInterval == 0 {
		c.JoinRetryInterval = DefaultJoinRetryInterval
	}
	if c.MaxJoinAttempts == 0 {
		c.MaxJoinAttempts = DefaultMaxJoinAttempts
	}
	if c.LeaveTimeout == 0 {
		c.LeaveTimeout = DefaultLeaveTimeout
	}

	if c.RoutingTablePushInterval == 0 {
		c.RoutingTablePushInterval = DefaultRoutingTablePushInterval
	}

	if c.TriggerBalancerInterval == 0 {
		c.TriggerBalancerInterval = DefaultTriggerBalancerInterval
	}

	if c.KeepAlivePeriod == 0 {
		c.KeepAlivePeriod = DefaultKeepAlivePeriod
	}

	if c.Client == nil {
		c.Client = NewClient()
	}

	if c.DMaps == nil {
		c.DMaps = &DMaps{}
	}

	if c.Authentication == nil {
		c.Authentication = &Authentication{}
	}

	if err := c.Authentication.Sanitize(); err != nil {
		return fmt.Errorf("failed to sanitize authentication configuration: %w", err)
	}

	if err := c.Client.Sanitize(); err != nil {
		return fmt.Errorf("failed to sanitize TCP client configuration: %w", err)
	}

	if err := c.DMaps.Sanitize(); err != nil {
		return fmt.Errorf("failed to sanitize DMap configuration: %w", err)
	}

	return nil
}

// New returns a Config with sane defaults. If you change a configuration parameter,
// please run Sanitize and Validate functions respectively.
//
// New takes an env parameter used by memberlist: local, lan and wan.
//
// local:
//
// DefaultLocalConfig works like DefaultConfig, however it returns a configuration
// that is optimized for a local loopback environments. The default configuration
// is still very conservative and errs on the side of caution.
//
// lan:
//
// DefaultLANConfig returns a sane set of configurations for Memberlist. It uses
// the hostname as the node name, and otherwise sets very conservative values
// that are sane for most LAN environments. The default configuration errs on
// the side of caution, choosing values that are optimized for higher convergence
// at the cost of higher bandwidth usage. Regardless, these values are a good
// starting point when getting started with memberlist.
//
// wan:
//
// DefaultWANConfig works like DefaultConfig, however it returns a configuration
// that is optimized for most WAN environments. The default configuration is still
// very conservative and errs on the side of caution.
func New(env string) *Config {
	c := &Config{
		BindAddr:          "0.0.0.0",
		BindPort:          DefaultPort,
		ReadRepair:        false,
		ReplicaCount:      1,
		WriteQ
Download .txt
gitextract_bkk5kycd/

├── .github/
│   ├── FUNDING.yml
│   └── workflows/
│       ├── ci.yml
│       ├── codeql-analysis.yml
│       └── golangci-lint.yml
├── .gitignore
├── Dockerfile
├── LICENSE
├── Makefile
├── README.md
├── auth.go
├── auth_test.go
├── client.go
├── cluster.go
├── cluster_client.go
├── cluster_client_test.go
├── cluster_iterator.go
├── cluster_iterator_test.go
├── cluster_test.go
├── cmd/
│   └── olric-server/
│       ├── main.go
│       ├── olric-server-local.yaml
│       └── server/
│           └── server.go
├── config/
│   ├── authentication.go
│   ├── client.go
│   ├── config.go
│   ├── config_test.go
│   ├── dmap.go
│   ├── dmap_test.go
│   ├── dmaps.go
│   ├── engine.go
│   ├── engine_test.go
│   ├── internal/
│   │   └── loader/
│   │       └── loader.go
│   ├── load.go
│   ├── memberlist.go
│   ├── network.go
│   └── network_test.go
├── docker/
│   ├── README.md
│   ├── docker-compose.yml
│   ├── nginx.conf
│   └── olric-server-consul.yaml
├── embedded_client.go
├── embedded_client_test.go
├── embedded_iterator.go
├── embedded_iterator_test.go
├── events/
│   ├── cluster_events.go
│   └── cluster_events_test.go
├── get_response.go
├── get_response_test.go
├── go.mod
├── go.sum
├── hasher/
│   └── hasher.go
├── integration_test.go
├── internal/
│   ├── bufpool/
│   │   ├── bufpool.go
│   │   └── bufpool_test.go
│   ├── checkpoint/
│   │   ├── checkpoint.go
│   │   └── checkpoint_test.go
│   ├── cluster/
│   │   ├── balancer/
│   │   │   ├── balancer.go
│   │   │   └── balancer_test.go
│   │   ├── partitions/
│   │   │   ├── fragment.go
│   │   │   ├── hkey.go
│   │   │   ├── hkey_test.go
│   │   │   ├── partition.go
│   │   │   ├── partition_test.go
│   │   │   ├── partitions.go
│   │   │   └── partitions_test.go
│   │   └── routingtable/
│   │       ├── callback.go
│   │       ├── callback_test.go
│   │       ├── discovery.go
│   │       ├── discovery_test.go
│   │       ├── distribute.go
│   │       ├── distribute_test.go
│   │       ├── events.go
│   │       ├── events_test.go
│   │       ├── handlers.go
│   │       ├── left_over_data.go
│   │       ├── left_over_data_test.go
│   │       ├── members.go
│   │       ├── members_test.go
│   │       ├── operations.go
│   │       ├── routingtable.go
│   │       ├── routingtable_test.go
│   │       └── update.go
│   ├── discovery/
│   │   ├── delegate.go
│   │   ├── discovery.go
│   │   ├── discovery_test.go
│   │   ├── events.go
│   │   ├── member.go
│   │   └── member_test.go
│   ├── dmap/
│   │   ├── atomic.go
│   │   ├── atomic_handlers.go
│   │   ├── atomic_test.go
│   │   ├── balance.go
│   │   ├── balance_test.go
│   │   ├── compaction.go
│   │   ├── compaction_test.go
│   │   ├── config.go
│   │   ├── config_test.go
│   │   ├── delete.go
│   │   ├── delete_handlers.go
│   │   ├── delete_test.go
│   │   ├── destroy.go
│   │   ├── destroy_handlers.go
│   │   ├── destroy_test.go
│   │   ├── dmap.go
│   │   ├── dmap_test.go
│   │   ├── env.go
│   │   ├── eviction.go
│   │   ├── eviction_test.go
│   │   ├── expire.go
│   │   ├── expire_handlers.go
│   │   ├── expire_test.go
│   │   ├── fragment.go
│   │   ├── fragment_test.go
│   │   ├── get.go
│   │   ├── get_handlers.go
│   │   ├── get_test.go
│   │   ├── handlers.go
│   │   ├── janitor.go
│   │   ├── lock.go
│   │   ├── lock_handlers.go
│   │   ├── lock_test.go
│   │   ├── put.go
│   │   ├── put_handlers.go
│   │   ├── put_test.go
│   │   ├── scan_handlers.go
│   │   ├── scan_test.go
│   │   ├── service.go
│   │   ├── service_test.go
│   │   └── stats_test.go
│   ├── environment/
│   │   ├── environment.go
│   │   └── environment_test.go
│   ├── locker/
│   │   ├── locker.go
│   │   └── locker_test.go
│   ├── protocol/
│   │   ├── cluster.go
│   │   ├── cluster_test.go
│   │   ├── commands.go
│   │   ├── dmap.go
│   │   ├── dmap_test.go
│   │   ├── errors.go
│   │   ├── errors_test.go
│   │   ├── pubsub.go
│   │   ├── pubsub_test.go
│   │   ├── system.go
│   │   └── system_test.go
│   ├── pubsub/
│   │   ├── handlers.go
│   │   ├── handlers_test.go
│   │   ├── pubsub.go
│   │   ├── pubsub_test.go
│   │   └── service.go
│   ├── ramblock/
│   │   ├── compaction.go
│   │   ├── compaction_test.go
│   │   ├── entry/
│   │   │   ├── entry.go
│   │   │   └── entry_test.go
│   │   ├── ramblock.go
│   │   ├── ramblock_test.go
│   │   ├── table/
│   │   │   ├── pack.go
│   │   │   ├── pack_test.go
│   │   │   ├── table.go
│   │   │   └── table_test.go
│   │   └── transport.go
│   ├── resp/
│   │   ├── encoder.go
│   │   ├── encoder_test.go
│   │   └── scan.go
│   ├── roundrobin/
│   │   ├── round_robin.go
│   │   └── round_robin_test.go
│   ├── server/
│   │   ├── client.go
│   │   ├── client_test.go
│   │   ├── handler.go
│   │   ├── handler_test.go
│   │   ├── mux.go
│   │   ├── mux_test.go
│   │   ├── server.go
│   │   └── server_test.go
│   ├── service/
│   │   └── service.go
│   ├── stats/
│   │   ├── stats.go
│   │   └── stats_test.go
│   ├── testcluster/
│   │   └── testcluster.go
│   ├── testutil/
│   │   ├── mockfragment/
│   │   │   └── mockfragment.go
│   │   └── testutil.go
│   └── util/
│       ├── safe.go
│       ├── strconv.go
│       └── unsafe.go
├── olric-server-docker.yaml
├── olric.go
├── olric_test.go
├── ping.go
├── ping_test.go
├── pipeline.go
├── pipeline_test.go
├── pkg/
│   ├── flog/
│   │   └── flog.go
│   ├── neterrors/
│   │   └── errors.go
│   ├── service_discovery/
│   │   └── service_discovery.go
│   └── storage/
│       ├── config.go
│       ├── config_test.go
│       ├── engine.go
│       ├── entry.go
│       └── stats.go
├── pubsub.go
├── pubsub_test.go
├── stats/
│   ├── stats.go
│   └── stats_test.go
├── stats.go
└── stats_test.go
Download .txt
SYMBOL INDEX (1519 symbols across 184 files)

FILE: auth.go
  method authCommandHandler (line 26) | func (db *Olric) authCommandHandler(conn redcon.Conn, cmd redcon.Command) {

FILE: auth_test.go
  function TestAuthCommandHandler_WithPassword (line 26) | func TestAuthCommandHandler_WithPassword(t *testing.T) {
  function TestAuthCommandHandler_Auth_Disabled (line 59) | func TestAuthCommandHandler_Auth_Disabled(t *testing.T) {

FILE: client.go
  constant DefaultScanCount (line 26) | DefaultScanCount = 10
  type Member (line 29) | type Member struct
  type Iterator (line 45) | type Iterator interface
  type LockContext (line 58) | type LockContext interface
  type PutOption (line 69) | type PutOption
  function EX (line 72) | func EX(ex time.Duration) PutOption {
  function PX (line 80) | func PX(px time.Duration) PutOption {
  function EXAT (line 88) | func EXAT(exat time.Duration) PutOption {
  function PXAT (line 96) | func PXAT(pxat time.Duration) PutOption {
  function NX (line 104) | func NX() PutOption {
  function XX (line 111) | func XX() PutOption {
  type dmapConfig (line 117) | type dmapConfig struct
  type DMapOption (line 122) | type DMapOption
  function StorageEntryImplementation (line 125) | func StorageEntryImplementation(e func() storage.Entry) DMapOption {
  type ScanOption (line 132) | type ScanOption
  function Count (line 138) | func Count(c int) ScanOption {
  function Match (line 146) | func Match(s string) ScanOption {
  type DMap (line 154) | type DMap interface
  type PipelineOption (line 246) | type PipelineOption
  function PipelineConcurrency (line 249) | func PipelineConcurrency(concurrency int) PipelineOption {
  type statsConfig (line 255) | type statsConfig struct
  type StatsOption (line 260) | type StatsOption
  function CollectRuntime (line 263) | func CollectRuntime() StatsOption {
  type pubsubConfig (line 269) | type pubsubConfig struct
  function ToAddress (line 274) | func ToAddress(addr string) PubSubOption {
  type PubSubOption (line 281) | type PubSubOption
  type Client (line 284) | type Client interface

FILE: cluster.go
  type Route (line 26) | type Route struct
  type RoutingTable (line 31) | type RoutingTable
  function mapToRoutingTable (line 33) | func mapToRoutingTable(slice []interface{}) (RoutingTable, error) {
  method clusterRoutingTableCommandHandler (line 81) | func (db *Olric) clusterRoutingTableCommandHandler(conn redcon.Conn, cmd...
  method fillRoutingTable (line 127) | func (db *Olric) fillRoutingTable() RoutingTable {
  method routingTable (line 144) | func (db *Olric) routingTable(ctx context.Context) (RoutingTable, error) {
  method clusterMembersCommandHandler (line 163) | func (db *Olric) clusterMembersCommandHandler(conn redcon.Conn, cmd redc...

FILE: cluster_client.go
  constant DefaultRoutingTableFetchInterval (line 49) | DefaultRoutingTableFetchInterval = time.Minute
  type ClusterLockContext (line 51) | type ClusterLockContext struct
    method Unlock (line 404) | func (c *ClusterLockContext) Unlock(ctx context.Context) error {
    method Lease (line 418) | func (c *ClusterLockContext) Lease(ctx context.Context, duration time....
  type ClusterDMap (line 58) | type ClusterDMap struct
    method Name (line 67) | func (dm *ClusterDMap) Name() string {
    method writePutCommand (line 87) | func (dm *ClusterDMap) writePutCommand(c *dmap.PutConfig, key string, ...
    method Put (line 139) | func (dm *ClusterDMap) Put(ctx context.Context, key string, value inte...
    method makeGetResponse (line 168) | func (dm *ClusterDMap) makeGetResponse(cmd *redis.StringCmd) (*GetResp...
    method Get (line 184) | func (dm *ClusterDMap) Get(ctx context.Context, key string) (*GetRespo...
    method Delete (line 199) | func (dm *ClusterDMap) Delete(ctx context.Context, keys ...string) (in...
    method Incr (line 220) | func (dm *ClusterDMap) Incr(ctx context.Context, key string, delta int...
    method Decr (line 240) | func (dm *ClusterDMap) Decr(ctx context.Context, key string, delta int...
    method GetPut (line 260) | func (dm *ClusterDMap) GetPut(ctx context.Context, key string, value i...
    method IncrByFloat (line 300) | func (dm *ClusterDMap) IncrByFloat(ctx context.Context, key string, de...
    method Expire (line 320) | func (dm *ClusterDMap) Expire(ctx context.Context, key string, timeout...
    method Lock (line 342) | func (dm *ClusterDMap) Lock(ctx context.Context, key string, deadline ...
    method LockWithTimeout (line 374) | func (dm *ClusterDMap) LockWithTimeout(ctx context.Context, key string...
    method Close (line 399) | func (dm *ClusterDMap) Close(_ context.Context) error {
    method Scan (line 437) | func (dm *ClusterDMap) Scan(ctx context.Context, options ...ScanOption...
    method Destroy (line 476) | func (dm *ClusterDMap) Destroy(ctx context.Context) error {
  function processProtocolError (line 72) | func processProtocolError(err error) error {
  type ClusterClient (line 492) | type ClusterClient struct
    method clientByPartID (line 110) | func (cl *ClusterClient) clientByPartID(partID uint64) (*redis.Client,...
    method smartPick (line 130) | func (cl *ClusterClient) smartPick(dmap, key string) (*redis.Client, e...
    method Ping (line 506) | func (cl *ClusterClient) Ping(ctx context.Context, addr, message strin...
    method RoutingTable (line 527) | func (cl *ClusterClient) RoutingTable(ctx context.Context) (RoutingTab...
    method Stats (line 552) | func (cl *ClusterClient) Stats(ctx context.Context, address string, op...
    method Members (line 587) | func (cl *ClusterClient) Members(ctx context.Context) ([]Member, error) {
    method RefreshMetadata (line 628) | func (cl *ClusterClient) RefreshMetadata(ctx context.Context) error {
    method Close (line 663) | func (cl *ClusterClient) Close(ctx context.Context) error {
    method NewPubSub (line 681) | func (cl *ClusterClient) NewPubSub(options ...PubSubOption) (*PubSub, ...
    method NewDMap (line 686) | func (cl *ClusterClient) NewDMap(name string, options ...DMapOption) (...
    method fetchRoutingTable (line 757) | func (cl *ClusterClient) fetchRoutingTable() error {
    method fetchRoutingTablePeriodically (line 777) | func (cl *ClusterClient) fetchRoutingTablePeriodically() {
  type ClusterClientOption (line 707) | type ClusterClientOption
  type clusterClientConfig (line 710) | type clusterClientConfig struct
  function WithHasher (line 719) | func WithHasher(h hasher.Hasher) ClusterClientOption {
  function WithLogger (line 726) | func WithLogger(l *log.Logger) ClusterClientOption {
  function WithConfig (line 733) | func WithConfig(c *config.Client) ClusterClientOption {
  function WithPassword (line 740) | func WithPassword(password string) ClusterClientOption {
  function WithRoutingTableFetchInterval (line 749) | func WithRoutingTableFetchInterval(interval time.Duration) ClusterClient...
  function NewClusterClient (line 797) | func NewClusterClient(addresses []string, options ...ClusterClientOption...

FILE: cluster_client_test.go
  function TestClusterClient_Ping (line 32) | func TestClusterClient_Ping(t *testing.T) {
  function TestClusterClient_Ping_WithMessage (line 49) | func TestClusterClient_Ping_WithMessage(t *testing.T) {
  function TestClusterClient_RoutingTable (line 67) | func TestClusterClient_RoutingTable(t *testing.T) {
  function TestClusterClient_RoutingTable_Cluster (line 84) | func TestClusterClient_RoutingTable_Cluster(t *testing.T) {
  function TestClusterClient_Put (line 104) | func TestClusterClient_Put(t *testing.T) {
  function TestClusterClient_Get (line 122) | func TestClusterClient_Get(t *testing.T) {
  function TestClusterClient_Delete (line 148) | func TestClusterClient_Delete(t *testing.T) {
  function TestClusterClient_Delete_Many_Keys (line 173) | func TestClusterClient_Delete_Many_Keys(t *testing.T) {
  function TestClusterClient_Destroy (line 200) | func TestClusterClient_Destroy(t *testing.T) {
  function TestClusterClient_Incr (line 224) | func TestClusterClient_Incr(t *testing.T) {
  function TestClusterClient_IncrByFloat (line 253) | func TestClusterClient_IncrByFloat(t *testing.T) {
  function TestClusterClient_Decr (line 282) | func TestClusterClient_Decr(t *testing.T) {
  function TestClusterClient_GetPut (line 314) | func TestClusterClient_GetPut(t *testing.T) {
  function TestClusterClient_Expire (line 340) | func TestClusterClient_Expire(t *testing.T) {
  function TestClusterClient_Lock_Unlock (line 366) | func TestClusterClient_Lock_Unlock(t *testing.T) {
  function TestClusterClient_Lock_Lease (line 387) | func TestClusterClient_Lock_Lease(t *testing.T) {
  function TestClusterClient_Lock_ErrLockNotAcquired (line 413) | func TestClusterClient_Lock_ErrLockNotAcquired(t *testing.T) {
  function TestClusterClient_LockWithTimeout (line 434) | func TestClusterClient_LockWithTimeout(t *testing.T) {
  function TestClusterClient_LockWithTimeout_ErrNoSuchLock (line 455) | func TestClusterClient_LockWithTimeout_ErrNoSuchLock(t *testing.T) {
  function TestClusterClient_LockWithTimeout_Then_Lease (line 478) | func TestClusterClient_LockWithTimeout_Then_Lease(t *testing.T) {
  function TestClusterClient_LockWithTimeout_ErrLockNotAcquired (line 505) | func TestClusterClient_LockWithTimeout_ErrLockNotAcquired(t *testing.T) {
  function TestClusterClient_Put_Ex (line 526) | func TestClusterClient_Put_Ex(t *testing.T) {
  function TestClusterClient_Put_PX (line 549) | func TestClusterClient_Put_PX(t *testing.T) {
  function TestClusterClient_Put_EXAT (line 572) | func TestClusterClient_Put_EXAT(t *testing.T) {
  function TestClusterClient_Put_PXAT (line 595) | func TestClusterClient_Put_PXAT(t *testing.T) {
  function TestClusterClient_Put_NX (line 618) | func TestClusterClient_Put_NX(t *testing.T) {
  function TestClusterClient_Put_XX (line 646) | func TestClusterClient_Put_XX(t *testing.T) {
  function TestClusterClient_Stats (line 664) | func TestClusterClient_Stats(t *testing.T) {
  function TestClusterClient_Stats_Cluster (line 682) | func TestClusterClient_Stats_Cluster(t *testing.T) {
  function TestClusterClient_Stats_CollectRuntime (line 704) | func TestClusterClient_Stats_CollectRuntime(t *testing.T) {
  function TestClusterClient_Set_Options (line 722) | func TestClusterClient_Set_Options(t *testing.T) {
  function TestClusterClient_Members (line 740) | func TestClusterClient_Members(t *testing.T) {
  function TestClusterClient_smartPick (line 769) | func TestClusterClient_smartPick(t *testing.T) {

FILE: cluster_iterator.go
  type currentCursor (line 27) | type currentCursor struct
  type ClusterIterator (line 33) | type ClusterIterator struct
    method loadRoute (line 55) | func (i *ClusterIterator) loadRoute() {
    method updateCursor (line 66) | func (i *ClusterIterator) updateCursor(owner string, cursor uint64) {
    method loadCursor (line 90) | func (i *ClusterIterator) loadCursor(owner string) uint64 {
    method updateIterator (line 104) | func (i *ClusterIterator) updateIterator(keys []string, cursor uint64,...
    method getOwners (line 114) | func (i *ClusterIterator) getOwners() []string {
    method removeScannedOwner (line 129) | func (i *ClusterIterator) removeScannedOwner(idx int) {
    method scanOnOwners (line 141) | func (i *ClusterIterator) scanOnOwners() error {
    method resetPage (line 179) | func (i *ClusterIterator) resetPage() {
    method fetchData (line 186) | func (i *ClusterIterator) fetchData() error {
    method reset (line 196) | func (i *ClusterIterator) reset() {
    method next (line 202) | func (i *ClusterIterator) next() bool {
    method Next (line 242) | func (i *ClusterIterator) Next() bool {
    method Key (line 256) | func (i *ClusterIterator) Key() string {
    method fetchRoutingTablePeriodically (line 267) | func (i *ClusterIterator) fetchRoutingTablePeriodically() {
    method fetchRoutingTable (line 282) | func (i *ClusterIterator) fetchRoutingTable() error {
    method Close (line 298) | func (i *ClusterIterator) Close() {

FILE: cluster_iterator_test.go
  function TestClusterClient_ScanMatch (line 26) | func TestClusterClient_ScanMatch(t *testing.T) {
  function TestClusterClient_Scan (line 64) | func TestClusterClient_Scan(t *testing.T) {

FILE: cluster_test.go
  function TestOlric_ClusterRoutingTable_clusterRoutingTableCommandHandler (line 25) | func TestOlric_ClusterRoutingTable_clusterRoutingTableCommandHandler(t *...
  function TestOlric_RoutingTable_Standalone (line 46) | func TestOlric_RoutingTable_Standalone(t *testing.T) {

FILE: cmd/olric-server/main.go
  function usage (line 33) | func usage() {
  type arguments (line 53) | type arguments struct
  constant DefaultConfigFile (line 61) | DefaultConfigFile = "olric-server-local.yaml"
  constant EnvConfigFile (line 64) | EnvConfigFile = "OLRIC_SERVER_CONFIG"
  function main (line 67) | func main() {

FILE: cmd/olric-server/server/server.go
  type OlricServer (line 33) | type OlricServer struct
    method waitForInterrupt (line 54) | func (s *OlricServer) waitForInterrupt() {
    method Start (line 89) | func (s *OlricServer) Start() error {
    method Shutdown (line 102) | func (s *OlricServer) Shutdown(ctx context.Context) error {
  function New (line 41) | func New(c *config.Config) (*OlricServer, error) {

FILE: config/authentication.go
  type Authentication (line 19) | type Authentication struct
    method Sanitize (line 24) | func (a *Authentication) Sanitize() error {
    method Validate (line 30) | func (a *Authentication) Validate() error {
    method Enabled (line 36) | func (a *Authentication) Enabled() bool {

FILE: config/client.go
  constant DefaultDialTimeout (line 29) | DefaultDialTimeout     = 5 * time.Second
  constant DefaultKeepalive (line 30) | DefaultKeepalive       = 5 * time.Minute
  constant DefaultReadTimeout (line 31) | DefaultReadTimeout     = 3 * time.Second
  constant DefaultIdleTimeout (line 32) | DefaultIdleTimeout     = 5 * time.Minute
  constant DefaultMinRetryBackoff (line 33) | DefaultMinRetryBackoff = 8 * time.Millisecond
  constant DefaultMaxRetryBackoff (line 34) | DefaultMaxRetryBackoff = 512 * time.Millisecond
  constant DefaultMaxRetries (line 35) | DefaultMaxRetries      = 3
  type Client (line 39) | type Client struct
    method Sanitize (line 122) | func (c *Client) Sanitize() error {
    method Validate (line 186) | func (c *Client) Validate() error {
    method RedisOptions (line 193) | func (c *Client) RedisOptions() *redis.Options {
  function NewClient (line 110) | func NewClient() *Client {

FILE: config/config.go
  type IConfig (line 33) | type IConfig interface
  constant SyncReplicationMode (line 45) | SyncReplicationMode = 0
  constant AsyncReplicationMode (line 49) | AsyncReplicationMode = 1
  constant LogLevelDebug (line 53) | LogLevelDebug = "DEBUG"
  constant LogLevelWarn (line 54) | LogLevelWarn  = "WARN"
  constant LogLevelError (line 55) | LogLevelError = "ERROR"
  constant LogLevelInfo (line 56) | LogLevelInfo  = "INFO"
  constant DefaultPort (line 61) | DefaultPort = 3320
  constant DefaultDiscoveryPort (line 64) | DefaultDiscoveryPort = 3322
  constant DefaultPartitionCount (line 67) | DefaultPartitionCount = 271
  constant DefaultLoadFactor (line 70) | DefaultLoadFactor = 1.25
  constant DefaultLogLevel (line 74) | DefaultLogLevel = LogLevelDebug
  constant DefaultLogVerbosity (line 102) | DefaultLogVerbosity = 3
  constant MinimumReplicaCount (line 106) | MinimumReplicaCount = 1
  constant DefaultBootstrapTimeout (line 110) | DefaultBootstrapTimeout = 10 * time.Second
  constant DefaultJoinRetryInterval (line 113) | DefaultJoinRetryInterval = time.Second
  constant DefaultMaxJoinAttempts (line 117) | DefaultMaxJoinAttempts = 10
  constant MinimumMemberCountQuorum (line 121) | MinimumMemberCountQuorum = 1
  constant DefaultLRUSamples (line 125) | DefaultLRUSamples int = 5
  constant LRUEviction (line 129) | LRUEviction EvictionPolicy = "LRU"
  constant DefaultStorageEngine (line 133) | DefaultStorageEngine = "ramblock"
  constant DefaultRoutingTablePushInterval (line 136) | DefaultRoutingTablePushInterval = time.Minute
  constant DefaultTriggerBalancerInterval (line 139) | DefaultTriggerBalancerInterval = 15 * time.Second
  constant DefaultCheckEmptyFragmentsInterval (line 143) | DefaultCheckEmptyFragmentsInterval = time.Minute
  constant DefaultTriggerCompactionInterval (line 148) | DefaultTriggerCompactionInterval = 10 * time.Minute
  constant DefaultLeaveTimeout (line 151) | DefaultLeaveTimeout = 5 * time.Second
  constant DefaultReadQuorum (line 153) | DefaultReadQuorum        = 1
  constant DefaultWriteQuorum (line 154) | DefaultWriteQuorum       = 1
  constant DefaultMemberCountQuorum (line 155) | DefaultMemberCountQuorum = 1
  constant DefaultKeepAlivePeriod (line 163) | DefaultKeepAlivePeriod = 300 * time.Second
  type Config (line 167) | type Config struct
    method Validate (line 326) | func (c *Config) Validate() error {
    method Sanitize (line 392) | func (c *Config) Sanitize() error {
  function New (line 533) | func New(env string) *Config {

FILE: config/config_test.go
  function createTmpFile (line 117) | func createTmpFile(t *testing.T, pattern string) *os.File {
  function TestConfig (line 135) | func TestConfig(t *testing.T) {
  function TestConfig_Initialize (line 240) | func TestConfig_Initialize(t *testing.T) {

FILE: config/dmap.go
  type EvictionPolicy (line 23) | type EvictionPolicy
  type DMap (line 31) | type DMap struct
    method Sanitize (line 70) | func (dm *DMap) Sanitize() error {
    method Validate (line 96) | func (dm *DMap) Validate() error {

FILE: config/dmap_test.go
  function TestConfig_DMap (line 23) | func TestConfig_DMap(t *testing.T) {

FILE: config/dmaps.go
  type DMaps (line 26) | type DMaps struct
    method Sanitize (line 82) | func (dm *DMaps) Sanitize() error {
    method Validate (line 132) | func (dm *DMaps) Validate() error {

FILE: config/engine.go
  type Engine (line 27) | type Engine struct
    method Validate (line 48) | func (s *Engine) Validate() error {
    method Sanitize (line 56) | func (s *Engine) Sanitize() error {
  function NewEngine (line 41) | func NewEngine() *Engine {

FILE: config/engine_test.go
  function TestEngine_KVStore_Backward_Compat (line 23) | func TestEngine_KVStore_Backward_Compat(t *testing.T) {
  function TestEngine_Dont_Overwrite_TableSize (line 33) | func TestEngine_Dont_Overwrite_TableSize(t *testing.T) {

FILE: config/internal/loader/loader.go
  type server (line 19) | type server struct
  type authentication (line 41) | type authentication struct
  type client (line 45) | type client struct
  type logging (line 61) | type logging struct
  type memberlist (line 67) | type memberlist struct
  type engine (line 98) | type engine struct
  type dmap (line 103) | type dmap struct
  type dmaps (line 113) | type dmaps struct
  type serviceDiscovery (line 127) | type serviceDiscovery
  type Loader (line 130) | type Loader struct
  function New (line 141) | func New(data []byte) (*Loader, error) {

FILE: config/load.go
  function mapYamlToConfig (line 33) | func mapYamlToConfig(rawDst, rawSrc interface{}) error {
  function loadDMapConfig (line 62) | func loadDMapConfig(c *loader.Loader) (*DMaps, error) {
  function loadMemberlistConfig (line 145) | func loadMemberlistConfig(c *loader.Loader, mc *memberlist.Config) (*mem...
  function Load (line 259) | func Load(filename string) (*Config, error) {

FILE: config/memberlist.go
  method validateMemberlistConfig (line 26) | func (c *Config) validateMemberlistConfig() error {
  function NewMemberlistConfig (line 60) | func NewMemberlistConfig(env string) (*memberlist.Config, error) {

FILE: config/network.go
  function addrParts (line 31) | func addrParts(address string) (string, int, error) {
  function getBindIPFromNetworkInterface (line 41) | func getBindIPFromNetworkInterface(addrs []net.Addr) (string, error) {
  function getBindIP (line 68) | func getBindIP(ifname, address string) (string, error) {
  method SetupNetworkConfig (line 134) | func (c *Config) SetupNetworkConfig() (err error) {

FILE: config/network_test.go
  function TestConfig_SetupNetworkConfig (line 23) | func TestConfig_SetupNetworkConfig(t *testing.T) {
  function TestConfig_SetupNetworkConfig_Memberlist_AdvertiseAddr (line 31) | func TestConfig_SetupNetworkConfig_Memberlist_AdvertiseAddr(t *testing.T) {

FILE: embedded_client.go
  type EmbeddedLockContext (line 32) | type EmbeddedLockContext struct
    method Unlock (line 39) | func (l *EmbeddedLockContext) Unlock(ctx context.Context) error {
    method Lease (line 45) | func (l *EmbeddedLockContext) Lease(ctx context.Context, duration time...
  type EmbeddedClient (line 51) | type EmbeddedClient struct
    method RefreshMetadata (line 122) | func (e *EmbeddedClient) RefreshMetadata(_ context.Context) error {
    method NewDMap (line 295) | func (e *EmbeddedClient) NewDMap(name string, options ...DMapOption) (...
    method Stats (line 316) | func (e *EmbeddedClient) Stats(ctx context.Context, address string, op...
    method Close (line 357) | func (e *EmbeddedClient) Close(_ context.Context) error {
    method Ping (line 364) | func (e *EmbeddedClient) Ping(ctx context.Context, addr, message strin...
    method RoutingTable (line 373) | func (e *EmbeddedClient) RoutingTable(ctx context.Context) (RoutingTab...
    method Members (line 378) | func (e *EmbeddedClient) Members(_ context.Context) ([]Member, error) {
    method NewPubSub (line 397) | func (e *EmbeddedClient) NewPubSub(options ...PubSubOption) (*PubSub, ...
  type EmbeddedDMap (line 56) | type EmbeddedDMap struct
    method setOrGetClusterClient (line 66) | func (dm *EmbeddedDMap) setOrGetClusterClient() (Client, error) {
    method Pipeline (line 106) | func (dm *EmbeddedDMap) Pipeline(opts ...PipelineOption) (*DMapPipelin...
    method Scan (line 133) | func (dm *EmbeddedDMap) Scan(ctx context.Context, options ...ScanOptio...
    method Lock (line 166) | func (dm *EmbeddedDMap) Lock(ctx context.Context, key string, deadline...
    method LockWithTimeout (line 188) | func (dm *EmbeddedDMap) LockWithTimeout(ctx context.Context, key strin...
    method Destroy (line 203) | func (dm *EmbeddedDMap) Destroy(ctx context.Context) error {
    method Expire (line 209) | func (dm *EmbeddedDMap) Expire(ctx context.Context, key string, timeou...
    method Name (line 214) | func (dm *EmbeddedDMap) Name() string {
    method GetPut (line 220) | func (dm *EmbeddedDMap) GetPut(ctx context.Context, key string, value ...
    method Decr (line 232) | func (dm *EmbeddedDMap) Decr(ctx context.Context, key string, delta in...
    method Incr (line 238) | func (dm *EmbeddedDMap) Incr(ctx context.Context, key string, delta in...
    method IncrByFloat (line 243) | func (dm *EmbeddedDMap) IncrByFloat(ctx context.Context, key string, d...
    method Delete (line 250) | func (dm *EmbeddedDMap) Delete(ctx context.Context, keys ...string) (i...
    method Get (line 257) | func (dm *EmbeddedDMap) Get(ctx context.Context, key string) (*GetResp...
    method Put (line 271) | func (dm *EmbeddedDMap) Put(ctx context.Context, key string, value int...
    method Close (line 284) | func (dm *EmbeddedDMap) Close(ctx context.Context) error {
  method NewEmbeddedClient (line 402) | func (db *Olric) NewEmbeddedClient() *EmbeddedClient {

FILE: embedded_client_test.go
  function TestEmbeddedClient_NewDMap (line 30) | func TestEmbeddedClient_NewDMap(t *testing.T) {
  function TestEmbeddedClient_DMap_Put (line 39) | func TestEmbeddedClient_DMap_Put(t *testing.T) {
  function TestEmbeddedClient_DMap_Put_EX (line 51) | func TestEmbeddedClient_DMap_Put_EX(t *testing.T) {
  function TestEmbeddedClient_DMap_Put_PX (line 69) | func TestEmbeddedClient_DMap_Put_PX(t *testing.T) {
  function TestEmbeddedClient_DMap_Put_EXAT (line 87) | func TestEmbeddedClient_DMap_Put_EXAT(t *testing.T) {
  function TestEmbeddedClient_DMap_Put_PXAT (line 105) | func TestEmbeddedClient_DMap_Put_PXAT(t *testing.T) {
  function TestEmbeddedClient_DMap_Put_NX (line 123) | func TestEmbeddedClient_DMap_Put_NX(t *testing.T) {
  function TestEmbeddedClient_DMap_Put_XX (line 141) | func TestEmbeddedClient_DMap_Put_XX(t *testing.T) {
  function TestEmbeddedClient_DMap_Get (line 154) | func TestEmbeddedClient_DMap_Get(t *testing.T) {
  function TestEmbeddedClient_DMap_Delete (line 173) | func TestEmbeddedClient_DMap_Delete(t *testing.T) {
  function TestEmbeddedClient_DMap_Delete_Many_Keys (line 192) | func TestEmbeddedClient_DMap_Delete_Many_Keys(t *testing.T) {
  function TestEmbeddedClient_DMap_Atomic_Incr (line 213) | func TestEmbeddedClient_DMap_Atomic_Incr(t *testing.T) {
  function TestEmbeddedClient_DMap_Atomic_Decr (line 237) | func TestEmbeddedClient_DMap_Atomic_Decr(t *testing.T) {
  function TestEmbeddedClient_DMap_GetPut (line 264) | func TestEmbeddedClient_DMap_GetPut(t *testing.T) {
  function TestEmbeddedClient_DMap_Atomic_IncrByFloat (line 286) | func TestEmbeddedClient_DMap_Atomic_IncrByFloat(t *testing.T) {
  function TestEmbeddedClient_DMap_Expire (line 310) | func TestEmbeddedClient_DMap_Expire(t *testing.T) {
  function TestEmbeddedClient_DMap_Destroy (line 331) | func TestEmbeddedClient_DMap_Destroy(t *testing.T) {
  function TestEmbeddedClient_DMap_Lock (line 360) | func TestEmbeddedClient_DMap_Lock(t *testing.T) {
  function TestEmbeddedClient_DMap_Lock_ErrLockNotAcquired (line 378) | func TestEmbeddedClient_DMap_Lock_ErrLockNotAcquired(t *testing.T) {
  function TestEmbeddedClient_DMap_Lock_ErrNoSuchLock (line 396) | func TestEmbeddedClient_DMap_Lock_ErrNoSuchLock(t *testing.T) {
  function TestEmbeddedClient_DMap_LockWithTimeout (line 417) | func TestEmbeddedClient_DMap_LockWithTimeout(t *testing.T) {
  function TestEmbeddedClient_DMap_LockWithTimeout_Timeout (line 435) | func TestEmbeddedClient_DMap_LockWithTimeout_Timeout(t *testing.T) {
  function TestEmbeddedClient_DMap_LockWithTimeout_ErrLockNotAcquired (line 455) | func TestEmbeddedClient_DMap_LockWithTimeout_ErrLockNotAcquired(t *testi...
  function TestEmbeddedClient_DMap_LockWithTimeout_ErrNoSuchLock (line 473) | func TestEmbeddedClient_DMap_LockWithTimeout_ErrNoSuchLock(t *testing.T) {
  function TestEmbeddedClient_DMap_LockWithTimeout_ErrNoSuchLock_Timeout (line 494) | func TestEmbeddedClient_DMap_LockWithTimeout_ErrNoSuchLock_Timeout(t *te...
  function TestEmbeddedClient_DMap_LockWithTimeout_Then_Lease (line 514) | func TestEmbeddedClient_DMap_LockWithTimeout_Then_Lease(t *testing.T) {
  function TestEmbeddedClient_RoutingTable_Standalone (line 538) | func TestEmbeddedClient_RoutingTable_Standalone(t *testing.T) {
  function TestEmbeddedClient_RoutingTable_Cluster (line 553) | func TestEmbeddedClient_RoutingTable_Cluster(t *testing.T) {
  function TestEmbeddedClient_Member (line 575) | func TestEmbeddedClient_Member(t *testing.T) {
  function TestEmbeddedClient_Ping (line 597) | func TestEmbeddedClient_Ping(t *testing.T) {
  function TestEmbeddedClient_Ping_WithMessage (line 608) | func TestEmbeddedClient_Ping_WithMessage(t *testing.T) {
  function TestEmbeddedClient_DMap_Put_PX_With_NX (line 620) | func TestEmbeddedClient_DMap_Put_PX_With_NX(t *testing.T) {
  function TestEmbeddedClient_Issue263 (line 644) | func TestEmbeddedClient_Issue263(t *testing.T) {

FILE: embedded_iterator.go
  type EmbeddedIterator (line 25) | type EmbeddedIterator struct
    method scanOnOwners (line 33) | func (e *EmbeddedIterator) scanOnOwners() error {
    method Next (line 85) | func (e *EmbeddedIterator) Next() bool {
    method Key (line 90) | func (e *EmbeddedIterator) Key() string {
    method Close (line 95) | func (e *EmbeddedIterator) Close() {

FILE: embedded_iterator_test.go
  function TestEmbeddedClient_ScanMatch (line 26) | func TestEmbeddedClient_ScanMatch(t *testing.T) {
  function TestEmbeddedClient_Scan (line 61) | func TestEmbeddedClient_Scan(t *testing.T) {

FILE: events/cluster_events.go
  constant ClusterEventsChannel (line 29) | ClusterEventsChannel       = "cluster.events"
  constant KindNodeJoinEvent (line 30) | KindNodeJoinEvent          = "node-join-event"
  constant KindNodeLeftEvent (line 31) | KindNodeLeftEvent          = "node-left-event"
  constant KindFragmentMigrationEvent (line 32) | KindFragmentMigrationEvent = "fragment-migration-event"
  constant KindFragmentReceivedEvent (line 33) | KindFragmentReceivedEvent  = "fragment-received-event"
  type Event (line 36) | type Event interface
  function encodeEvent (line 41) | func encodeEvent(data interface{}, fields []string, valueExtractor func(...
  type NodeJoinEvent (line 85) | type NodeJoinEvent struct
    method Encode (line 92) | func (n *NodeJoinEvent) Encode() (string, error) {
  type NodeLeftEvent (line 108) | type NodeLeftEvent struct
    method Encode (line 115) | func (n *NodeLeftEvent) Encode() (string, error) {
  type FragmentMigrationEvent (line 131) | type FragmentMigrationEvent struct
    method Encode (line 143) | func (f *FragmentMigrationEvent) Encode() (string, error) {
  type FragmentReceivedEvent (line 173) | type FragmentReceivedEvent struct
    method Encode (line 184) | func (f *FragmentReceivedEvent) Encode() (string, error) {

FILE: events/cluster_events_test.go
  function TestClusterEvents_NodeJoinEvent (line 23) | func TestClusterEvents_NodeJoinEvent(t *testing.T) {
  function TestClusterEvents_NodeLeftEvent (line 37) | func TestClusterEvents_NodeLeftEvent(t *testing.T) {
  function TestClusterEvents_FragmentMigrationEvent (line 51) | func TestClusterEvents_FragmentMigrationEvent(t *testing.T) {
  function TestClusterEvents_FragmentReceivedEvent (line 69) | func TestClusterEvents_FragmentReceivedEvent(t *testing.T) {

FILE: get_response.go
  type GetResponse (line 27) | type GetResponse struct
    method Scan (line 31) | func (g *GetResponse) Scan(v interface{}) error {
    method Int (line 38) | func (g *GetResponse) Int() (int, error) {
    method String (line 47) | func (g *GetResponse) String() (string, error) {
    method Int8 (line 56) | func (g *GetResponse) Int8() (int8, error) {
    method Int16 (line 65) | func (g *GetResponse) Int16() (int16, error) {
    method Int32 (line 74) | func (g *GetResponse) Int32() (int32, error) {
    method Int64 (line 83) | func (g *GetResponse) Int64() (int64, error) {
    method Uint (line 92) | func (g *GetResponse) Uint() (uint, error) {
    method Uint8 (line 101) | func (g *GetResponse) Uint8() (uint8, error) {
    method Uint16 (line 110) | func (g *GetResponse) Uint16() (uint16, error) {
    method Uint32 (line 119) | func (g *GetResponse) Uint32() (uint32, error) {
    method Uint64 (line 128) | func (g *GetResponse) Uint64() (uint64, error) {
    method Float32 (line 137) | func (g *GetResponse) Float32() (float32, error) {
    method Float64 (line 146) | func (g *GetResponse) Float64() (float64, error) {
    method Bool (line 155) | func (g *GetResponse) Bool() (bool, error) {
    method Time (line 164) | func (g *GetResponse) Time() (time.Time, error) {
    method Duration (line 173) | func (g *GetResponse) Duration() (time.Duration, error) {
    method Byte (line 182) | func (g *GetResponse) Byte() ([]byte, error) {
    method TTL (line 191) | func (g *GetResponse) TTL() int64 {
    method Timestamp (line 195) | func (g *GetResponse) Timestamp() int64 {

FILE: get_response_test.go
  function TestDMap_Get_GetResponse (line 30) | func TestDMap_Get_GetResponse(t *testing.T) {
  type myType (line 373) | type myType struct
    method MarshalBinary (line 377) | func (mt *myType) MarshalBinary() ([]byte, error) {
    method UnmarshalBinary (line 381) | func (mt *myType) UnmarshalBinary(data []byte) error {

FILE: hasher/hasher.go
  function NewDefaultHasher (line 21) | func NewDefaultHasher() Hasher {
  type xxhasher (line 25) | type xxhasher struct
    method Sum64 (line 27) | func (x xxhasher) Sum64(key []byte) uint64 {
  type Hasher (line 35) | type Hasher interface

FILE: integration_test.go
  function TestIntegration_NodesJoinOrLeftDuringQuery (line 29) | func TestIntegration_NodesJoinOrLeftDuringQuery(t *testing.T) {
  function TestIntegration_DMap_Cache_Eviction_LRU_MaxKeys (line 105) | func TestIntegration_DMap_Cache_Eviction_LRU_MaxKeys(t *testing.T) {
  function TestIntegration_DMap_Cache_Eviction_MaxKeys (line 154) | func TestIntegration_DMap_Cache_Eviction_MaxKeys(t *testing.T) {
  function TestIntegration_DMap_Cache_Eviction_MaxIdleDuration (line 210) | func TestIntegration_DMap_Cache_Eviction_MaxIdleDuration(t *testing.T) {
  function TestIntegration_DMap_Cache_Eviction_TTLDuration (line 259) | func TestIntegration_DMap_Cache_Eviction_TTLDuration(t *testing.T) {
  function TestIntegration_DMap_Cache_Eviction_LRU_MaxInuse (line 308) | func TestIntegration_DMap_Cache_Eviction_LRU_MaxInuse(t *testing.T) {
  function TestIntegration_Kill_Nodes_During_Operation (line 358) | func TestIntegration_Kill_Nodes_During_Operation(t *testing.T) {
  function scanIntegrationTestCommon (line 433) | func scanIntegrationTestCommon(t *testing.T, embedded bool, keyFunc func...
  function TestIntegration_Network_Partitioning_Cluster_DM_SCAN (line 516) | func TestIntegration_Network_Partitioning_Cluster_DM_SCAN(t *testing.T) {
  function TestIntegration_Network_Partitioning_Cluster_DM_SCAN_Match (line 526) | func TestIntegration_Network_Partitioning_Cluster_DM_SCAN_Match(t *testi...
  function TestIntegration_Network_Partitioning_Embedded_DM_SCAN (line 541) | func TestIntegration_Network_Partitioning_Embedded_DM_SCAN(t *testing.T) {
  function TestIntegration_Network_Partitioning_Embedded_DM_SCAN_Match (line 551) | func TestIntegration_Network_Partitioning_Embedded_DM_SCAN_Match(t *test...

FILE: internal/bufpool/bufpool.go
  type BufPool (line 23) | type BufPool struct
    method Put (line 39) | func (p *BufPool) Put(b *bytes.Buffer) {
    method Get (line 46) | func (p *BufPool) Get() *bytes.Buffer {
  function New (line 28) | func New() *BufPool {

FILE: internal/bufpool/bufpool_test.go
  function TestBufPool (line 23) | func TestBufPool(t *testing.T) {

FILE: internal/checkpoint/checkpoint.go
  function Add (line 24) | func Add() {
  function Pass (line 28) | func Pass() {
  function AllPassed (line 32) | func AllPassed() bool {

FILE: internal/checkpoint/checkpoint_test.go
  function TestCheckpoint (line 24) | func TestCheckpoint(t *testing.T) {

FILE: internal/cluster/balancer/balancer.go
  type Balancer (line 32) | type Balancer struct
    method isAlive (line 60) | func (b *Balancer) isAlive() bool {
    method scanPartition (line 70) | func (b *Balancer) scanPartition(sign uint64, part *partitions.Partiti...
    method primaryCopies (line 100) | func (b *Balancer) primaryCopies() {
    method breakLoop (line 128) | func (b *Balancer) breakLoop(sign uint64) bool {
    method backupCopies (line 142) | func (b *Balancer) backupCopies() {
    method triggerBalancer (line 187) | func (b *Balancer) triggerBalancer() {
    method BalanceEagerly (line 203) | func (b *Balancer) BalanceEagerly() {
    method balance (line 207) | func (b *Balancer) balance() {
    method Start (line 224) | func (b *Balancer) Start() error {
    method RegisterHandlers (line 230) | func (b *Balancer) RegisterHandlers() {}
    method Shutdown (line 232) | func (b *Balancer) Shutdown(ctx context.Context) error {
  function New (line 45) | func New(e *environment.Environment) *Balancer {

FILE: internal/cluster/balancer/balancer_test.go
  function newTestEnvironment (line 39) | func newTestEnvironment(c *config.Config) *environment.Environment {
  function newBalancerForTest (line 53) | func newBalancerForTest(e *environment.Environment) *Balancer {
  type mockCluster (line 69) | type mockCluster struct
    method addNode (line 86) | func (mc *mockCluster) addNode(e *environment.Environment) *Balancer {
    method shutdown (line 142) | func (mc *mockCluster) shutdown() {
  function newMockCluster (line 77) | func newMockCluster(t *testing.T) *mockCluster {
  function TestBalance_Primary_Move (line 147) | func TestBalance_Primary_Move(t *testing.T) {
  function checkBackupOwnership (line 193) | func checkBackupOwnership(e *environment.Environment) error {
  function TestBalance_Empty_Backup_Move (line 211) | func TestBalance_Empty_Backup_Move(t *testing.T) {
  function TestBalance_Backup_Move (line 244) | func TestBalance_Backup_Move(t *testing.T) {

FILE: internal/cluster/partitions/fragment.go
  type Fragment (line 22) | type Fragment interface

FILE: internal/cluster/partitions/hkey.go
  function SetHashFunc (line 29) | func SetHashFunc(h hasher.Hasher) {
  function HKey (line 35) | func HKey(name, key string) uint64 {

FILE: internal/cluster/partitions/hkey_test.go
  function TestPartitions_HKey (line 24) | func TestPartitions_HKey(t *testing.T) {

FILE: internal/cluster/partitions/partition.go
  type Partition (line 25) | type Partition struct
    method Kind (line 34) | func (p *Partition) Kind() Kind {
    method ID (line 38) | func (p *Partition) ID() uint64 {
    method Map (line 42) | func (p *Partition) Map() *sync.Map {
    method Owner (line 47) | func (p *Partition) Owner() discovery.Member {
    method OwnerCount (line 60) | func (p *Partition) OwnerCount() int {
    method Owners (line 69) | func (p *Partition) Owners() []discovery.Member {
    method SetOwners (line 77) | func (p *Partition) SetOwners(owners []discovery.Member) {
    method Length (line 81) | func (p *Partition) Length() int {

FILE: internal/cluster/partitions/partition_test.go
  type testFragment (line 26) | type testFragment struct
    method Stats (line 30) | func (tf *testFragment) Stats() storage.Stats {
    method Name (line 34) | func (tf *testFragment) Name() string {
    method Move (line 38) | func (tf *testFragment) Move(_ *Partition, _ string, _ []discovery.Mem...
    method Close (line 42) | func (tf *testFragment) Close() error {
    method Destroy (line 46) | func (tf *testFragment) Destroy() error {
    method Compaction (line 50) | func (tf *testFragment) Compaction() (bool, error) {
  function TestPartition (line 54) | func TestPartition(t *testing.T) {

FILE: internal/cluster/partitions/partitions.go
  type Kind (line 23) | type Kind
    method String (line 25) | func (k Kind) String() string {
  constant PRIMARY (line 37) | PRIMARY = Kind(iota + 1)
  constant BACKUP (line 38) | BACKUP
  type Partitions (line 41) | type Partitions struct
    method PartitionByID (line 64) | func (ps *Partitions) PartitionByID(partID uint64) *Partition {
    method PartitionIDByHKey (line 69) | func (ps *Partitions) PartitionIDByHKey(hkey uint64) uint64 {
    method PartitionByHKey (line 74) | func (ps *Partitions) PartitionByHKey(hkey uint64) *Partition {
    method PartitionOwnersByHKey (line 80) | func (ps *Partitions) PartitionOwnersByHKey(hkey uint64) []discovery.M...
    method PartitionOwnersByID (line 86) | func (ps *Partitions) PartitionOwnersByID(partID uint64) []discovery.M...
  function New (line 47) | func New(count uint64, kind Kind) *Partitions {

FILE: internal/cluster/partitions/partitions_test.go
  function TestPartitions (line 24) | func TestPartitions(t *testing.T) {

FILE: internal/cluster/routingtable/callback.go
  method AddCallback (line 17) | func (r *RoutingTable) AddCallback(f func()) {
  method runCallbacks (line 24) | func (r *RoutingTable) runCallbacks() {

FILE: internal/cluster/routingtable/callback_test.go
  function TestRoutingTable_Callback (line 25) | func TestRoutingTable_Callback(t *testing.T) {

FILE: internal/cluster/routingtable/discovery.go
  method bootstrapCoordinator (line 32) | func (r *RoutingTable) bootstrapCoordinator() error {
  method attemptToJoin (line 47) | func (r *RoutingTable) attemptToJoin() error {
  method tryWithInterval (line 77) | func (r *RoutingTable) tryWithInterval(ctx context.Context, interval tim...

FILE: internal/cluster/routingtable/discovery_test.go
  function TestRoutingTable_tryWithInterval (line 26) | func TestRoutingTable_tryWithInterval(t *testing.T) {
  function TestRoutingTable_attemptToJoin (line 43) | func TestRoutingTable_attemptToJoin(t *testing.T) {

FILE: internal/cluster/routingtable/distribute.go
  method distributePrimaryCopies (line 25) | func (r *RoutingTable) distributePrimaryCopies(partID uint64) []discover...
  method getReplicaOwners (line 99) | func (r *RoutingTable) getReplicaOwners(partID uint64) ([]consistent.Mem...
  function isOwner (line 114) | func isOwner(member discovery.Member, owners []consistent.Member) bool {
  method distributeBackups (line 123) | func (r *RoutingTable) distributeBackups(partID uint64) []discovery.Memb...

FILE: internal/cluster/routingtable/distribute_test.go
  function TestRoutingTable_distributedBackups (line 26) | func TestRoutingTable_distributedBackups(t *testing.T) {

FILE: internal/cluster/routingtable/events.go
  method publishNodeJoinEvent (line 24) | func (r *RoutingTable) publishNodeJoinEvent(m *discovery.Member) {
  method publishNodeLeftEvent (line 45) | func (r *RoutingTable) publishNodeLeftEvent(m *discovery.Member) {

FILE: internal/cluster/routingtable/events_test.go
  function TestRoutingTable_publishNodeJoinEvent (line 31) | func TestRoutingTable_publishNodeJoinEvent(t *testing.T) {
  function TestRoutingTable_publishNodeLeftEvent (line 64) | func TestRoutingTable_publishNodeLeftEvent(t *testing.T) {

FILE: internal/cluster/routingtable/handlers.go
  method RegisterHandlers (line 21) | func (r *RoutingTable) RegisterHandlers() {

FILE: internal/cluster/routingtable/left_over_data.go
  method processLeftOverDataReports (line 22) | func (r *RoutingTable) processLeftOverDataReports(reports map[discovery....

FILE: internal/cluster/routingtable/left_over_data_test.go
  function TestRoutingTable_LeftOverData (line 27) | func TestRoutingTable_LeftOverData(t *testing.T) {

FILE: internal/cluster/routingtable/members.go
  type Members (line 24) | type Members struct
    method Add (line 35) | func (m *Members) Add(member discovery.Member) {
    method Get (line 39) | func (m *Members) Get(id uint64) (discovery.Member, error) {
    method Delete (line 47) | func (m *Members) Delete(id uint64) {
    method DeleteByName (line 51) | func (m *Members) DeleteByName(other discovery.Member) {
    method Length (line 59) | func (m *Members) Length() int {
    method Range (line 63) | func (m *Members) Range(f func(id uint64, member discovery.Member) boo...
  function newMembers (line 29) | func newMembers() *Members {

FILE: internal/cluster/routingtable/members_test.go
  function TestMembers_Get (line 24) | func TestMembers_Get(t *testing.T) {
  function TestMembers_Delete (line 40) | func TestMembers_Delete(t *testing.T) {
  function TestMembers_DeleteByName (line 54) | func TestMembers_DeleteByName(t *testing.T) {
  function TestMembers_Length (line 68) | func TestMembers_Length(t *testing.T) {
  function TestMembers_Range (line 80) | func TestMembers_Range(t *testing.T) {

FILE: internal/cluster/routingtable/operations.go
  method lengthOfPartCommandHandler (line 27) | func (r *RoutingTable) lengthOfPartCommandHandler(conn redcon.Conn, cmd ...
  method verifyRoutingTable (line 47) | func (r *RoutingTable) verifyRoutingTable(id uint64, table map[uint64]*r...
  method updateRoutingCommandHandler (line 66) | func (r *RoutingTable) updateRoutingCommandHandler(conn redcon.Conn, cmd...

FILE: internal/cluster/routingtable/routingtable.go
  type route (line 41) | type route struct
  type RoutingTable (line 46) | type RoutingTable struct
    method Discovery (line 122) | func (r *RoutingTable) Discovery() *discovery.Discovery {
    method This (line 126) | func (r *RoutingTable) This() discovery.Member {
    method setNumMembers (line 131) | func (r *RoutingTable) setNumMembers() {
    method SetNumMembersEagerly (line 138) | func (r *RoutingTable) SetNumMembersEagerly(nr int32) {
    method NumMembers (line 142) | func (r *RoutingTable) NumMembers() int32 {
    method Members (line 146) | func (r *RoutingTable) Members() *Members {
    method setSignature (line 150) | func (r *RoutingTable) setSignature(s uint64) {
    method Signature (line 154) | func (r *RoutingTable) Signature() uint64 {
    method setOwnedPartitionCount (line 158) | func (r *RoutingTable) setOwnedPartitionCount() {
    method OwnedPartitionCount (line 169) | func (r *RoutingTable) OwnedPartitionCount() uint64 {
    method CheckMemberCountQuorum (line 173) | func (r *RoutingTable) CheckMemberCountQuorum() error {
    method markBootstrapped (line 182) | func (r *RoutingTable) markBootstrapped() {
    method IsBootstrapped (line 187) | func (r *RoutingTable) IsBootstrapped() bool {
    method CheckBootstrap (line 194) | func (r *RoutingTable) CheckBootstrap() error {
    method fillRoutingTable (line 212) | func (r *RoutingTable) fillRoutingTable() {
    method UpdateEagerly (line 231) | func (r *RoutingTable) UpdateEagerly() {
    method updateRouting (line 235) | func (r *RoutingTable) updateRouting() {
    method processClusterEvent (line 262) | func (r *RoutingTable) processClusterEvent(event *discovery.ClusterEve...
    method listenClusterEvents (line 321) | func (r *RoutingTable) listenClusterEvents(eventCh chan *discovery.Clu...
    method pushPeriodically (line 334) | func (r *RoutingTable) pushPeriodically() {
    method Join (line 349) | func (r *RoutingTable) Join() error {
    method Start (line 378) | func (r *RoutingTable) Start() error {
    method Shutdown (line 434) | func (r *RoutingTable) Shutdown(ctx context.Context) error {
  function registerErrors (line 81) | func registerErrors() {
  function New (line 88) | func New(e *environment.Environment) *RoutingTable {

FILE: internal/cluster/routingtable/routingtable_test.go
  function newRoutingTableForTest (line 36) | func newRoutingTableForTest(c *config.Config, srv *server.Server) *Routi...
  type testCluster (line 57) | type testCluster struct
    method addNode (line 72) | func (t *testCluster) addNode(c *config.Config) (*RoutingTable, error) {
    method shutdown (line 113) | func (t *testCluster) shutdown() error {
  function newTestCluster (line 64) | func newTestCluster() *testCluster {
  function TestRoutingTable_SingleNode (line 118) | func TestRoutingTable_SingleNode(t *testing.T) {
  function TestRoutingTable_Cluster (line 152) | func TestRoutingTable_Cluster(t *testing.T) {
  function TestRoutingTable_CheckPartitionOwnership (line 211) | func TestRoutingTable_CheckPartitionOwnership(t *testing.T) {
  function TestRoutingTable_NodeLeave (line 255) | func TestRoutingTable_NodeLeave(t *testing.T) {
  function TestRoutingTable_NodeUpdate (line 317) | func TestRoutingTable_NodeUpdate(t *testing.T) {

FILE: internal/cluster/routingtable/update.go
  type leftOverDataReport (line 29) | type leftOverDataReport struct
  method prepareLeftOverDataReport (line 34) | func (r *RoutingTable) prepareLeftOverDataReport() ([]byte, error) {
  method updateRoutingTableOnMember (line 50) | func (r *RoutingTable) updateRoutingTableOnMember(data []byte, member di...
  method updateRoutingTableOnCluster (line 72) | func (r *RoutingTable) updateRoutingTableOnCluster() (map[discovery.Memb...

FILE: internal/discovery/delegate.go
  type delegate (line 18) | type delegate struct
    method NodeMeta (line 36) | func (d delegate) NodeMeta(limit int) []byte {
    method NotifyMsg (line 41) | func (d delegate) NotifyMsg(data []byte) {}
    method GetBroadcasts (line 44) | func (d delegate) GetBroadcasts(overhead, limit int) [][]byte { return...
    method LocalState (line 47) | func (d delegate) LocalState(join bool) []byte { return nil }
    method MergeRemoteState (line 50) | func (d delegate) MergeRemoteState(buf []byte, join bool) {}
  method newDelegate (line 23) | func (d *Discovery) newDelegate() (delegate, error) {

FILE: internal/discovery/discovery.go
  constant eventChanCapacity (line 36) | eventChanCapacity = 256
  type ClusterEvent (line 46) | type ClusterEvent struct
    method MemberAddr (line 54) | func (c *ClusterEvent) MemberAddr() string {
  type Discovery (line 61) | type Discovery struct
    method loadServiceDiscoveryPlugin (line 95) | func (d *Discovery) loadServiceDiscoveryPlugin() error {
    method increaseUptimeSeconds (line 135) | func (d *Discovery) increaseUptimeSeconds() {
    method Start (line 151) | func (d *Discovery) Start() error {
    method Join (line 196) | func (d *Discovery) Join() (int, error) {
    method Rejoin (line 207) | func (d *Discovery) Rejoin(peers []string) (int, error) {
    method GetMembers (line 212) | func (d *Discovery) GetMembers() []Member {
    method NumMembers (line 227) | func (d *Discovery) NumMembers() int {
    method FindMemberByName (line 232) | func (d *Discovery) FindMemberByName(name string) (Member, error) {
    method FindMemberByID (line 243) | func (d *Discovery) FindMemberByID(id uint64) (Member, error) {
    method GetCoordinator (line 254) | func (d *Discovery) GetCoordinator() Member {
    method IsCoordinator (line 264) | func (d *Discovery) IsCoordinator() bool {
    method LocalNode (line 269) | func (d *Discovery) LocalNode() *memberlist.Node {
    method Shutdown (line 280) | func (d *Discovery) Shutdown() error {
  function New (line 82) | func New(log *flog.Logger, c *config.Config) *Discovery {

FILE: internal/discovery/discovery_test.go
  type testCluster (line 33) | type testCluster struct
    method addNewMember (line 52) | func (tc *testCluster) addNewMember(t *testing.T) *Discovery {
  function newTestCluster (line 39) | func newTestCluster(t *testing.T) *testCluster {
  function TestDiscovery_GetCoordinator (line 79) | func TestDiscovery_GetCoordinator(t *testing.T) {
  function TestDiscovery_GetMembers (line 87) | func TestDiscovery_GetMembers(t *testing.T) {
  function TestDiscovery_IsCoordinator (line 96) | func TestDiscovery_IsCoordinator(t *testing.T) {
  function TestDiscovery_NumMembers (line 111) | func TestDiscovery_NumMembers(t *testing.T) {
  function TestDiscovery_LocalNode (line 120) | func TestDiscovery_LocalNode(t *testing.T) {
  function TestDiscovery_FindMemberByID (line 127) | func TestDiscovery_FindMemberByID(t *testing.T) {
  function TestDiscovery_FindMemberByName (line 140) | func TestDiscovery_FindMemberByName(t *testing.T) {
  function TestDiscovery_increaseUptimeSeconds (line 153) | func TestDiscovery_increaseUptimeSeconds(t *testing.T) {
  type dummyServiceDiscovery (line 162) | type dummyServiceDiscovery struct
    method Initialize (line 175) | func (d *dummyServiceDiscovery) Initialize() error {
    method SetConfig (line 183) | func (d *dummyServiceDiscovery) SetConfig(_ map[string]interface{}) er...
    method SetLogger (line 191) | func (d *dummyServiceDiscovery) SetLogger(_ *log.Logger) {
    method Register (line 198) | func (d *dummyServiceDiscovery) Register() error {
    method Deregister (line 207) | func (d *dummyServiceDiscovery) Deregister() error {
    method DiscoverPeers (line 215) | func (d *dummyServiceDiscovery) DiscoverPeers() ([]string, error) {
    method Close (line 223) | func (d *dummyServiceDiscovery) Close() error {
  function TestDiscovery_loadServiceDiscoveryPlugin (line 233) | func TestDiscovery_loadServiceDiscoveryPlugin(t *testing.T) {
  function TestDiscovery_ClusterEvents (line 258) | func TestDiscovery_ClusterEvents(t *testing.T) {

FILE: internal/discovery/events.go
  function ToClusterEvent (line 19) | func ToClusterEvent(e memberlist.NodeEvent) *ClusterEvent {
  method handleEvent (line 29) | func (d *Discovery) handleEvent(event memberlist.NodeEvent) {
  method eventLoop (line 42) | func (d *Discovery) eventLoop(eventsCh chan memberlist.NodeEvent) {
  method SubscribeNodeEvents (line 55) | func (d *Discovery) SubscribeNodeEvents() chan *ClusterEvent {

FILE: internal/discovery/member.go
  type Member (line 27) | type Member struct
    method CompareByID (line 35) | func (m Member) CompareByID(other Member) bool {
    method CompareByName (line 42) | func (m Member) CompareByName(other Member) bool {
    method String (line 46) | func (m Member) String() string {
    method Encode (line 50) | func (m Member) Encode() ([]byte, error) {
  function NewMemberFromMetadata (line 54) | func NewMemberFromMetadata(metadata []byte) (Member, error) {
  function MemberID (line 60) | func MemberID(name string, birthdate int64) uint64 {
  function NewMember (line 68) | func NewMember(c *config.Config) Member {

FILE: internal/discovery/member_test.go
  function TestMembers (line 23) | func TestMembers(t *testing.T) {

FILE: internal/dmap/atomic.go
  method loadCurrentAtomicInt (line 29) | func (dm *DMap) loadCurrentAtomicInt(e *env) (int, int64, error) {
  method atomicIncrDecr (line 48) | func (dm *DMap) atomicIncrDecr(cmd string, e *env, delta int) (int, erro...
  method Incr (line 97) | func (dm *DMap) Incr(ctx context.Context, key string, delta int) (int, e...
  method Decr (line 105) | func (dm *DMap) Decr(ctx context.Context, key string, delta int) (int, e...
  method getPut (line 112) | func (dm *DMap) getPut(e *env) (storage.Entry, error) {
  method GetPut (line 141) | func (dm *DMap) GetPut(ctx context.Context, key string, value interface{...
  method atomicIncrByFloat (line 171) | func (dm *DMap) atomicIncrByFloat(e *env, delta float64) (float64, error) {
  method IncrByFloat (line 223) | func (dm *DMap) IncrByFloat(ctx context.Context, key string, delta float...

FILE: internal/dmap/atomic_handlers.go
  method incrDecrCommon (line 24) | func (s *Service) incrDecrCommon(cmd, dmap, key string, delta int) (int,...
  method incrCommandHandler (line 36) | func (s *Service) incrCommandHandler(conn redcon.Conn, cmd redcon.Comman...
  method decrCommandHandler (line 50) | func (s *Service) decrCommandHandler(conn redcon.Conn, cmd redcon.Comman...
  method getPutCommandHandler (line 64) | func (s *Service) getPutCommandHandler(conn redcon.Conn, cmd redcon.Comm...
  method incrByFloatCommandHandler (line 99) | func (s *Service) incrByFloatCommandHandler(conn redcon.Conn, cmd redcon...

FILE: internal/dmap/atomic_test.go
  function TestDMap_loadCurrentAtomicInt (line 33) | func TestDMap_loadCurrentAtomicInt(t *testing.T) {
  function TestDMap_Atomic_Incr (line 63) | func TestDMap_Atomic_Incr(t *testing.T) {
  function TestDMap_Atomic_Decr (line 104) | func TestDMap_Atomic_Decr(t *testing.T) {
  function TestDMap_Atomic_GetPut (line 146) | func TestDMap_Atomic_GetPut(t *testing.T) {
  function TestDMap_Atomic_IncrByFloat (line 196) | func TestDMap_Atomic_IncrByFloat(t *testing.T) {
  function TestDMap_incrCommandHandler (line 237) | func TestDMap_incrCommandHandler(t *testing.T) {
  function TestDMap_incrCommandHandler_Single_Request (line 270) | func TestDMap_incrCommandHandler_Single_Request(t *testing.T) {
  function TestDMap_decrCommandHandler (line 285) | func TestDMap_decrCommandHandler(t *testing.T) {
  function TestDMap_decrCommandHandler_Single_Request (line 318) | func TestDMap_decrCommandHandler_Single_Request(t *testing.T) {
  function TestDMap_exGetPutOperation (line 333) | func TestDMap_exGetPutOperation(t *testing.T) {
  function TestDMap_incrByFloatCommandHandler (line 403) | func TestDMap_incrByFloatCommandHandler(t *testing.T) {

FILE: internal/dmap/balance.go
  type fragmentPack (line 31) | type fragmentPack struct
  method fragmentMergeFunction (line 38) | func (dm *DMap) fragmentMergeFunction(f *fragment, hkey uint64, entry st...
  method mergeFragments (line 57) | func (dm *DMap) mergeFragments(part *partitions.Partition, fp *fragmentP...
  method checkOwnership (line 72) | func (s *Service) checkOwnership(part *partitions.Partition) bool {
  method validateFragmentPack (line 82) | func (s *Service) validateFragmentPack(fp *fragmentPack) error {
  method moveFragmentCommandHandler (line 103) | func (s *Service) moveFragmentCommandHandler(conn redcon.Conn, cmd redco...

FILE: internal/dmap/balance_test.go
  function TestDMap_Balance_Invalid_PartID (line 33) | func TestDMap_Balance_Invalid_PartID(t *testing.T) {
  function TestDMap_Balance_FragmentMergeFunction (line 48) | func TestDMap_Balance_FragmentMergeFunction(t *testing.T) {
  function TestDMap_Balancer_JoinNewNode (line 78) | func TestDMap_Balancer_JoinNewNode(t *testing.T) {
  function TestDMap_Balancer_WrongOwnership (line 116) | func TestDMap_Balancer_WrongOwnership(t *testing.T) {
  function TestDMap_Balancer_ClusterEvents (line 138) | func TestDMap_Balancer_ClusterEvents(t *testing.T) {

FILE: internal/dmap/compaction.go
  method callCompactionOnFragment (line 28) | func (s *Service) callCompactionOnFragment(f *fragment) bool {
  method doCompaction (line 52) | func (s *Service) doCompaction(partID uint64) {
  method triggerCompaction (line 72) | func (s *Service) triggerCompaction() {
  method compactionWorker (line 107) | func (s *Service) compactionWorker() {

FILE: internal/dmap/compaction_test.go
  function TestDMap_Compaction (line 32) | func TestDMap_Compaction(t *testing.T) {

FILE: internal/dmap/config.go
  type dmapConfig (line 25) | type dmapConfig struct
    method load (line 35) | func (c *dmapConfig) load(dc *config.DMaps, name string) error {

FILE: internal/dmap/config_test.go
  function TestDMap_Config (line 26) | func TestDMap_Config(t *testing.T) {

FILE: internal/dmap/delete.go
  method deleteFromFragment (line 36) | func (dm *DMap) deleteFromFragment(key string, kind partitions.Kind) err...
  method deleteFromPreviousOwners (line 54) | func (dm *DMap) deleteFromPreviousOwners(key string, owners []discovery....
  method deleteBackupOnCluster (line 72) | func (dm *DMap) deleteBackupOnCluster(hkey uint64, key string) error {
  method deleteOnCluster (line 92) | func (dm *DMap) deleteOnCluster(hkey uint64, key string, f *fragment) er...
  method deleteKey (line 121) | func (dm *DMap) deleteKey(key string) error {
  method deleteKeys (line 142) | func (dm *DMap) deleteKeys(ctx context.Context, keys ...string) (int, er...
  method Delete (line 174) | func (dm *DMap) Delete(ctx context.Context, keys ...string) (int, error) {

FILE: internal/dmap/delete_handlers.go
  method delCommandHandler (line 23) | func (s *Service) delCommandHandler(conn redcon.Conn, cmd redcon.Command) {
  method delEntryCommandHandler (line 44) | func (s *Service) delEntryCommandHandler(conn redcon.Conn, cmd redcon.Co...

FILE: internal/dmap/delete_test.go
  function checkEmptyStorageEngine (line 35) | func checkEmptyStorageEngine(t *testing.T, s *Service) {
  function TestDMap_Delete_Cluster (line 73) | func TestDMap_Delete_Cluster(t *testing.T) {
  function TestDMap_Delete_Lookup (line 100) | func TestDMap_Delete_Lookup(t *testing.T) {
  function TestDMap_Delete_StaleFragments (line 129) | func TestDMap_Delete_StaleFragments(t *testing.T) {
  function TestDMap_Delete_PreviousOwner (line 199) | func TestDMap_Delete_PreviousOwner(t *testing.T) {
  function TestDMap_Delete_DeleteKeyValFromPreviousOwners (line 222) | func TestDMap_Delete_DeleteKeyValFromPreviousOwners(t *testing.T) {
  function TestDMap_Delete_Backup (line 257) | func TestDMap_Delete_Backup(t *testing.T) {
  function TestDMap_Delete_Compaction (line 305) | func TestDMap_Delete_Compaction(t *testing.T) {

FILE: internal/dmap/destroy.go
  method destroyOnCluster (line 27) | func (dm *DMap) destroyOnCluster(ctx context.Context) error {
  method Destroy (line 72) | func (dm *DMap) Destroy(ctx context.Context) error {

FILE: internal/dmap/destroy_handlers.go
  method destroyFragmentOnPartition (line 26) | func (dm *DMap) destroyFragmentOnPartition(part *partitions.Partition) e...
  method destroyLocalDMap (line 38) | func (s *Service) destroyLocalDMap(name string) error {
  method destroyCommandHandler (line 72) | func (s *Service) destroyCommandHandler(conn redcon.Conn, cmd redcon.Com...

FILE: internal/dmap/destroy_test.go
  function TestDMap_Destroy_Standalone (line 27) | func TestDMap_Destroy_Standalone(t *testing.T) {
  function TestDMap_Destroy_Cluster (line 59) | func TestDMap_Destroy_Cluster(t *testing.T) {
  function TestDMap_Destroy_destroyOperation (line 99) | func TestDMap_Destroy_destroyOperation(t *testing.T) {

FILE: internal/dmap/dmap.go
  constant nilTimeout (line 26) | nilTimeout = 0 * time.Second
  type DMap (line 36) | type DMap struct
    method Name (line 45) | func (dm *DMap) Name() string {
    method getPartitionByHKey (line 115) | func (dm *DMap) getPartitionByHKey(hkey uint64, kind partitions.Kind) ...
  method getDMap (line 50) | func (s *Service) getDMap(name string) (*DMap, error) {
  method fragmentName (line 61) | func (s *Service) fragmentName(name string) string {
  method NewDMap (line 67) | func (s *Service) NewDMap(name string) (*DMap, error) {
  method getOrCreateDMap (line 107) | func (s *Service) getOrCreateDMap(name string) (*DMap, error) {
  function isKeyExpired (line 128) | func isKeyExpired(ttl int64) bool {

FILE: internal/dmap/dmap_test.go
  function TestDMap_Name (line 25) | func TestDMap_Name(t *testing.T) {

FILE: internal/dmap/env.go
  type env (line 24) | type env struct
  function newEnv (line 37) | func newEnv(ctx context.Context) *env {

FILE: internal/dmap/eviction.go
  method isKeyIdleOnFragment (line 32) | func (dm *DMap) isKeyIdleOnFragment(hkey uint64, f *fragment) bool {
  method isKeyIdle (line 53) | func (dm *DMap) isKeyIdle(hkey uint64) bool {
  method evictKeysAtBackground (line 69) | func (s *Service) evictKeysAtBackground() {
  method evictKeys (line 102) | func (s *Service) evictKeys() {
  method scanFragmentForEviction (line 114) | func (s *Service) scanFragmentForEviction(partID uint64, name string, f ...
  type lruItem (line 202) | type lruItem struct
  method evictKeyWithLRU (line 207) | func (dm *DMap) evictKeyWithLRU(e *env) error {

FILE: internal/dmap/eviction_test.go
  function TestDMap_Eviction_TTL (line 28) | func TestDMap_Eviction_TTL(t *testing.T) {
  function TestDMap_Eviction_Config_TTLDuration (line 67) | func TestDMap_Eviction_Config_TTLDuration(t *testing.T) {
  function TestDMap_Eviction_Config_MaxIdleDuration (line 106) | func TestDMap_Eviction_Config_MaxIdleDuration(t *testing.T) {
  function TestDMap_Eviction_LRU_Config_MaxKeys (line 146) | func TestDMap_Eviction_LRU_Config_MaxKeys(t *testing.T) {
  function TestDMap_Eviction_LRU_Config_MaxInuse (line 181) | func TestDMap_Eviction_LRU_Config_MaxInuse(t *testing.T) {

FILE: internal/dmap/expire.go
  method Expire (line 24) | func (dm *DMap) Expire(ctx context.Context, key string, timeout time.Dur...

FILE: internal/dmap/expire_handlers.go
  method expireCommandHandler (line 22) | func (s *Service) expireCommandHandler(conn redcon.Conn, cmd redcon.Comm...
  method pexpireCommandHandler (line 52) | func (s *Service) pexpireCommandHandler(conn redcon.Conn, cmd redcon.Com...

FILE: internal/dmap/expire_test.go
  function TestDMap_Expire (line 27) | func TestDMap_Expire(t *testing.T) {
  function TestDMap_Expire_ErrKeyNotFound (line 53) | func TestDMap_Expire_ErrKeyNotFound(t *testing.T) {
  function TestDMap_Expire_expireCommandHandler (line 65) | func TestDMap_Expire_expireCommandHandler(t *testing.T) {
  function TestDMap_Expire_pexpireCommandHandler (line 90) | func TestDMap_Expire_pexpireCommandHandler(t *testing.T) {

FILE: internal/dmap/fragment.go
  type fragment (line 32) | type fragment struct
    method Stats (line 41) | func (f *fragment) Stats() storage.Stats {
    method Compaction (line 48) | func (f *fragment) Compaction() (bool, error) {
    method Destroy (line 58) | func (f *fragment) Destroy() error {
    method Close (line 67) | func (f *fragment) Close() error {
    method Name (line 72) | func (f *fragment) Name() string {
    method Move (line 76) | func (f *fragment) Move(part *partitions.Partition, name string, owner...
  method newFragment (line 131) | func (dm *DMap) newFragment() (*fragment, error) {
  method loadOrCreateFragment (line 153) | func (dm *DMap) loadOrCreateFragment(part *partitions.Partition) (*fragm...
  method loadFragment (line 173) | func (dm *DMap) loadFragment(part *partitions.Partition) (*fragment, err...

FILE: internal/dmap/fragment_test.go
  function TestDMap_Fragment (line 27) | func TestDMap_Fragment(t *testing.T) {
  function TestDMap_Fragment_Concurrent_Access (line 72) | func TestDMap_Fragment_Concurrent_Access(t *testing.T) {

FILE: internal/dmap/get.go
  type Entry (line 32) | type Entry struct
  type version (line 53) | type version struct
  method getOnFragment (line 60) | func (dm *DMap) getOnFragment(e *env) (storage.Entry, error) {
  method lookupOnPreviousOwner (line 89) | func (dm *DMap) lookupOnPreviousOwner(owner *discovery.Member, key strin...
  method valueToVersion (line 108) | func (dm *DMap) valueToVersion(value storage.Entry) *version {
  method lookupOnThisNode (line 118) | func (dm *DMap) lookupOnThisNode(hkey uint64, key string) *version {
  method lookupOnOwners (line 151) | func (dm *DMap) lookupOnOwners(hkey uint64, key string) []*version {
  method sortVersions (line 194) | func (dm *DMap) sortVersions(versions []*version) []*version {
  method sanitizeAndSortVersions (line 206) | func (dm *DMap) sanitizeAndSortVersions(versions []*version) []*version {
  method lookupOnReplicas (line 222) | func (dm *DMap) lookupOnReplicas(hkey uint64, key string) []*version {
  method readRepair (line 274) | func (dm *DMap) readRepair(winner *version, versions []*version) {
  method getOnCluster (line 330) | func (dm *DMap) getOnCluster(hkey uint64, key string) (storage.Entry, er...
  method Get (line 371) | func (dm *DMap) Get(ctx context.Context, key string) (storage.Entry, err...

FILE: internal/dmap/get_handlers.go
  method getCommandHandler (line 23) | func (s *Service) getCommandHandler(conn redcon.Conn, cmd redcon.Command) {
  method getEntryCommandHandler (line 48) | func (s *Service) getEntryCommandHandler(conn redcon.Conn, cmd redcon.Co...

FILE: internal/dmap/get_test.go
  function TestDMap_Get_Standalone (line 27) | func TestDMap_Get_Standalone(t *testing.T) {
  function TestDMap_Get_Cluster (line 49) | func TestDMap_Get_Cluster(t *testing.T) {
  function TestDMap_Get_Lookup (line 75) | func TestDMap_Get_Lookup(t *testing.T) {
  function TestDMap_Get_NilValue (line 104) | func TestDMap_Get_NilValue(t *testing.T) {
  function TestDMap_Get_NilValue_Cluster (line 137) | func TestDMap_Get_NilValue_Cluster(t *testing.T) {
  function TestDMap_Put_ReadQuorum (line 166) | func TestDMap_Put_ReadQuorum(t *testing.T) {
  function TestDMap_Get_ReadRepair (line 187) | func TestDMap_Get_ReadRepair(t *testing.T) {

FILE: internal/dmap/handlers.go
  method RegisterHandlers (line 21) | func (s *Service) RegisterHandlers() {

FILE: internal/dmap/janitor.go
  function wipeOutFragment (line 24) | func wipeOutFragment(part *partitions.Partition, name string, f *fragmen...
  method janitor (line 40) | func (s *Service) janitor(part *partitions.Partition) {
  method deleteEmptyFragments (line 70) | func (s *Service) deleteEmptyFragments() {
  method janitorWorker (line 82) | func (s *Service) janitorWorker() {

FILE: internal/dmap/lock.go
  method unlockKey (line 39) | func (dm *DMap) unlockKey(ctx context.Context, key string, token []byte)...
  method Unlock (line 74) | func (dm *DMap) Unlock(ctx context.Context, key string, token []byte) er...
  method tryLock (line 93) | func (dm *DMap) tryLock(e *env, deadline time.Duration) error {
  method Lock (line 138) | func (dm *DMap) Lock(ctx context.Context, key string, timeout, deadline ...
  method leaseKey (line 166) | func (dm *DMap) leaseKey(ctx context.Context, key string, token []byte, ...
  method Lease (line 207) | func (dm *DMap) Lease(ctx context.Context, key string, token []byte, tim...

FILE: internal/dmap/lock_handlers.go
  method unlockCommandHandler (line 25) | func (s *Service) unlockCommandHandler(conn redcon.Conn, cmd redcon.Comm...
  method lockCommandHandler (line 52) | func (s *Service) lockCommandHandler(conn redcon.Conn, cmd redcon.Comman...
  method lockLeaseCommandHandler (line 83) | func (s *Service) lockLeaseCommandHandler(conn redcon.Conn, cmd redcon.C...
  method plockLeaseCommandHandler (line 110) | func (s *Service) plockLeaseCommandHandler(conn redcon.Conn, cmd redcon....

FILE: internal/dmap/lock_test.go
  function TestDMap_Lock_With_Timeout_Standalone (line 29) | func TestDMap_Lock_With_Timeout_Standalone(t *testing.T) {
  function TestDMap_Unlock_After_Timeout_Standalone (line 46) | func TestDMap_Unlock_After_Timeout_Standalone(t *testing.T) {
  function TestDMap_Lock_With_Timeout_ErrLockNotAcquired_Standalone (line 65) | func TestDMap_Lock_With_Timeout_ErrLockNotAcquired_Standalone(t *testing...
  function TestDMap_LockLease_Standalone (line 82) | func TestDMap_LockLease_Standalone(t *testing.T) {
  function TestDMap_Lock_Standalone (line 111) | func TestDMap_Lock_Standalone(t *testing.T) {
  function TestDMap_Lock_ErrLockNotAcquired_Standalone (line 128) | func TestDMap_Lock_ErrLockNotAcquired_Standalone(t *testing.T) {
  function TestDMap_LockWithTimeout_Cluster (line 145) | func TestDMap_LockWithTimeout_Cluster(t *testing.T) {
  function TestDMap_LockLease_Cluster (line 169) | func TestDMap_LockLease_Cluster(t *testing.T) {
  function TestDMap_Lock_Cluster (line 193) | func TestDMap_Lock_Cluster(t *testing.T) {
  function TestDMap_LockWithTimeout_ErrLockNotAcquired_Cluster (line 217) | func TestDMap_LockWithTimeout_ErrLockNotAcquired_Cluster(t *testing.T) {
  function TestDMap_Lock_After_Lock_With_Timeout_Cluster (line 241) | func TestDMap_Lock_After_Lock_With_Timeout_Cluster(t *testing.T) {
  function TestDMap_tryLock (line 264) | func TestDMap_tryLock(t *testing.T) {
  function TestDMap_lockCommandHandler (line 295) | func TestDMap_lockCommandHandler(t *testing.T) {
  function TestDMap_lockCommandHandler_EX (line 316) | func TestDMap_lockCommandHandler_EX(t *testing.T) {
  function TestDMap_lockCommandHandler_PX (line 337) | func TestDMap_lockCommandHandler_PX(t *testing.T) {
  function TestDMap_lockLeaseCommandHandler (line 359) | func TestDMap_lockLeaseCommandHandler(t *testing.T) {
  function TestDMap_plockLeaseCommandHandler (line 386) | func TestDMap_plockLeaseCommandHandler(t *testing.T) {

FILE: internal/dmap/put.go
  function prepareTTL (line 47) | func prepareTTL(e *env) int64 {
  method putEntryOnFragment (line 68) | func (dm *DMap) putEntryOnFragment(e *env, nt storage.Entry) error {
  method prepareEntry (line 96) | func (dm *DMap) prepareEntry(e *env) storage.Entry {
  method putOnReplicaFragment (line 105) | func (dm *DMap) putOnReplicaFragment(e *env) error {
  method asyncPutOnBackup (line 133) | func (dm *DMap) asyncPutOnBackup(e *env, data []byte, owner discovery.Me...
  method asyncPutOnCluster (line 153) | func (dm *DMap) asyncPutOnCluster(e *env, nt storage.Entry) error {
  method syncPutOnCluster (line 174) | func (dm *DMap) syncPutOnCluster(e *env, nt storage.Entry) error {
  method setLRUEvictionStats (line 211) | func (dm *DMap) setLRUEvictionStats(e *env) error {
  method checkPutConditions (line 257) | func (dm *DMap) checkPutConditions(e *env) error {
  method putOnCluster (line 292) | func (dm *DMap) putOnCluster(e *env) error {
  method writePutCommand (line 337) | func (dm *DMap) writePutCommand(e *env) (*redis.StatusCmd, error) {
  method put (line 362) | func (dm *DMap) put(e *env) error {
  type PutConfig (line 383) | type PutConfig struct
  method Put (line 401) | func (dm *DMap) Put(ctx context.Context, key string, value interface{}, ...

FILE: internal/dmap/put_handlers.go
  method putCommandHandler (line 25) | func (s *Service) putCommandHandler(conn redcon.Conn, cmd redcon.Command) {
  method putEntryCommandHandler (line 73) | func (s *Service) putEntryCommandHandler(conn redcon.Conn, cmd redcon.Co...

FILE: internal/dmap/put_test.go
  function TestDMap_Put_Standalone (line 33) | func TestDMap_Put_Standalone(t *testing.T) {
  function TestDMap_Put_Cluster (line 54) | func TestDMap_Put_Cluster(t *testing.T) {
  function TestDMap_Put_AsyncReplicationMode (line 80) | func TestDMap_Put_AsyncReplicationMode(t *testing.T) {
  function TestDMap_Put_WriteQuorum (line 117) | func TestDMap_Put_WriteQuorum(t *testing.T) {
  function TestDMap_Put_PX (line 150) | func TestDMap_Put_PX(t *testing.T) {
  function TestDMap_Put_NX (line 182) | func TestDMap_Put_NX(t *testing.T) {
  function TestDMap_Put_XX (line 214) | func TestDMap_Put_XX(t *testing.T) {
  function TestDMap_Put_EX (line 242) | func TestDMap_Put_EX(t *testing.T) {
  function TestDMap_Put_EXAT (line 274) | func TestDMap_Put_EXAT(t *testing.T) {
  function TestDMap_Put_PXAT (line 304) | func TestDMap_Put_PXAT(t *testing.T) {
  function TestDMap_Put_ErrKeyTooLarge (line 334) | func TestDMap_Put_ErrKeyTooLarge(t *testing.T) {
  function TestDMap_Put_ErrEntryTooLarge (line 351) | func TestDMap_Put_ErrEntryTooLarge(t *testing.T) {
  function TestDMap_Put_PX_With_NX (line 368) | func TestDMap_Put_PX_With_NX(t *testing.T) {

FILE: internal/dmap/scan_handlers.go
  method scanOnFragment (line 26) | func (dm *DMap) scanOnFragment(f *fragment, cursor uint64, sc *ScanConfi...
  method Scan (line 54) | func (dm *DMap) Scan(partID, cursor uint64, sc *ScanConfig) ([]string, u...
  type ScanConfig (line 71) | type ScanConfig struct
  type ScanOption (line 79) | type ScanOption
  function Count (line 81) | func Count(c int) ScanOption {
  function Match (line 88) | func Match(s string) ScanOption {
  method scanCommandHandler (line 95) | func (s *Service) scanCommandHandler(conn redcon.Conn, cmd redcon.Comman...

FILE: internal/dmap/scan_test.go
  function testScanIterator (line 28) | func testScanIterator(t *testing.T, s *Service, allKeys map[string]bool,...
  function TestDMap_scanCommandHandler_Standalone (line 70) | func TestDMap_scanCommandHandler_Standalone(t *testing.T) {
  function TestDMap_scanCommandHandler_Cluster (line 95) | func TestDMap_scanCommandHandler_Cluster(t *testing.T) {
  function TestDMap_scanCommandHandler_match (line 148) | func TestDMap_scanCommandHandler_match(t *testing.T) {
  function TestDMap_scanCommandHandler_count (line 181) | func TestDMap_scanCommandHandler_count(t *testing.T) {

FILE: internal/dmap/service.go
  type storageMap (line 38) | type storageMap struct
  type Service (line 43) | type Service struct
    method isAlive (line 97) | func (s *Service) isAlive() bool {
    method publishEvent (line 115) | func (s *Service) publishEvent(e events.Event) {
    method Start (line 132) | func (s *Service) Start() error {
    method Shutdown (line 145) | func (s *Service) Shutdown(ctx context.Context) error {
  function registerErrors (line 61) | func registerErrors() {
  function NewService (line 73) | func NewService(e *environment.Environment) (service.Service, error) {
  function getType (line 107) | func getType(data interface{}) string {

FILE: internal/dmap/service_test.go
  function TestDMapService (line 24) | func TestDMapService(t *testing.T) {

FILE: internal/dmap/stats_test.go
  function TestDMap_Stats (line 27) | func TestDMap_Stats(t *testing.T) {

FILE: internal/environment/environment.go
  type Environment (line 19) | type Environment struct
    method Get (line 31) | func (e *Environment) Get(key string) interface{} {
    method Set (line 42) | func (e *Environment) Set(key string, value interface{}) {
    method Clone (line 49) | func (e *Environment) Clone() *Environment {
  function New (line 25) | func New() *Environment {

FILE: internal/environment/environment_test.go
  type envTest (line 23) | type envTest struct
  function TestEnvironment (line 27) | func TestEnvironment(t *testing.T) {

FILE: internal/locker/locker.go
  type Locker (line 26) | type Locker struct
    method Lock (line 72) | func (l *Locker) Lock(name string) {
    method Unlock (line 97) | func (l *Locker) Unlock(name string) error {
  type lockCtr (line 32) | type lockCtr struct
    method inc (line 40) | func (l *lockCtr) inc() {
    method dec (line 45) | func (l *lockCtr) dec() {
    method count (line 50) | func (l *lockCtr) count() int32 {
    method Lock (line 55) | func (l *lockCtr) Lock() {
    method Unlock (line 60) | func (l *lockCtr) Unlock() {
  function New (line 65) | func New() *Locker {

FILE: internal/locker/locker_test.go
  function TestLockCounter (line 11) | func TestLockCounter(t *testing.T) {
  function TestLockerLock (line 25) | func TestLockerLock(t *testing.T) {
  function TestLockerUnlock (line 77) | func TestLockerUnlock(t *testing.T) {
  function TestLockerConcurrency (line 96) | func TestLockerConcurrency(t *testing.T) {
  function BenchmarkLocker (line 128) | func BenchmarkLocker(b *testing.B) {
  function BenchmarkLockerParallel (line 136) | func BenchmarkLockerParallel(b *testing.B) {
  function BenchmarkLockerMoreKeys (line 147) | func BenchmarkLockerMoreKeys(b *testing.B) {

FILE: internal/protocol/cluster.go
  type ClusterRoutingTable (line 24) | type ClusterRoutingTable struct
    method Command (line 30) | func (c *ClusterRoutingTable) Command(ctx context.Context) *redis.Cmd {
  function NewClusterRoutingTable (line 26) | func NewClusterRoutingTable() *ClusterRoutingTable {
  function ParseClusterRoutingTable (line 36) | func ParseClusterRoutingTable(cmd redcon.Command) (*ClusterRoutingTable,...
  type ClusterMembers (line 45) | type ClusterMembers struct
    method Command (line 51) | func (c *ClusterMembers) Command(ctx context.Context) *redis.Cmd {
  function NewClusterMembers (line 47) | func NewClusterMembers() *ClusterMembers {
  function ParseClusterMembers (line 57) | func ParseClusterMembers(cmd redcon.Command) (*ClusterMembers, error) {

FILE: internal/protocol/cluster_test.go
  function TestProtocol_ClusterRoutingTable (line 24) | func TestProtocol_ClusterRoutingTable(t *testing.T) {
  function TestProtocol_ClusterMembers (line 38) | func TestProtocol_ClusterMembers(t *testing.T) {

FILE: internal/protocol/commands.go
  constant StatusOK (line 17) | StatusOK = "OK"
  type ClusterCommands (line 19) | type ClusterCommands struct
  type InternalCommands (line 29) | type InternalCommands struct
  type GenericCommands (line 42) | type GenericCommands struct
  type DMapCommands (line 54) | type DMapCommands struct
  type PubSubCommands (line 97) | type PubSubCommands struct

FILE: internal/protocol/dmap.go
  type Put (line 30) | type Put struct
    method SetEX (line 50) | func (p *Put) SetEX(ex float64) *Put {
    method SetPX (line 55) | func (p *Put) SetPX(px int64) *Put {
    method SetEXAT (line 60) | func (p *Put) SetEXAT(exat float64) *Put {
    method SetPXAT (line 65) | func (p *Put) SetPXAT(pxat int64) *Put {
    method SetNX (line 70) | func (p *Put) SetNX() *Put {
    method SetXX (line 75) | func (p *Put) SetXX() *Put {
    method Command (line 80) | func (p *Put) Command(ctx context.Context) *redis.StatusCmd {
  function NewPut (line 42) | func NewPut(dmap, key string, value []byte) *Put {
  function ParsePutCommand (line 118) | func ParsePutCommand(cmd redcon.Command) (*Put, error) {
  type PutEntry (line 180) | type PutEntry struct
    method Command (line 194) | func (p *PutEntry) Command(ctx context.Context) *redis.StatusCmd {
  function NewPutEntry (line 186) | func NewPutEntry(dmap, key string, value []byte) *PutEntry {
  function ParsePutEntryCommand (line 203) | func ParsePutEntryCommand(cmd redcon.Command) (*PutEntry, error) {
  type Get (line 215) | type Get struct
    method SetRaw (line 228) | func (g *Get) SetRaw() *Get {
    method Command (line 233) | func (g *Get) Command(ctx context.Context) *redis.StringCmd {
  function NewGet (line 221) | func NewGet(dmap, key string) *Get {
  function ParseGetCommand (line 244) | func ParseGetCommand(cmd redcon.Command) (*Get, error) {
  type GetEntry (line 266) | type GetEntry struct
    method SetReplica (line 279) | func (g *GetEntry) SetReplica() *GetEntry {
    method Command (line 284) | func (g *GetEntry) Command(ctx context.Context) *redis.StringCmd {
  function NewGetEntry (line 272) | func NewGetEntry(dmap, key string) *GetEntry {
  function ParseGetEntryCommand (line 295) | func ParseGetEntryCommand(cmd redcon.Command) (*GetEntry, error) {
  type Del (line 317) | type Del struct
    method Command (line 329) | func (d *Del) Command(ctx context.Context) *redis.IntCmd {
  function NewDel (line 322) | func NewDel(dmap string, keys ...string) *Del {
  function ParseDelCommand (line 339) | func ParseDelCommand(cmd redcon.Command) (*Del, error) {
  type DelEntry (line 353) | type DelEntry struct
    method SetReplica (line 364) | func (d *DelEntry) SetReplica() *DelEntry {
    method Command (line 369) | func (d *DelEntry) Command(ctx context.Context) *redis.IntCmd {
  function NewDelEntry (line 358) | func NewDelEntry(dmap, key string) *DelEntry {
  function ParseDelEntryCommand (line 379) | func ParseDelEntryCommand(cmd redcon.Command) (*DelEntry, error) {
  type PExpire (line 401) | type PExpire struct
    method Command (line 415) | func (p *PExpire) Command(ctx context.Context) *redis.StatusCmd {
  function NewPExpire (line 407) | func NewPExpire(dmap, key string, milliseconds time.Duration) *PExpire {
  function ParsePExpireCommand (line 424) | func ParsePExpireCommand(cmd redcon.Command) (*PExpire, error) {
  type Expire (line 442) | type Expire struct
    method Command (line 456) | func (e *Expire) Command(ctx context.Context) *redis.StatusCmd {
  function NewExpire (line 448) | func NewExpire(dmap, key string, seconds time.Duration) *Expire {
  function ParseExpireCommand (line 465) | func ParseExpireCommand(cmd redcon.Command) (*Expire, error) {
  type Destroy (line 483) | type Destroy struct
    method SetLocal (line 494) | func (d *Destroy) SetLocal() *Destroy {
    method Command (line 499) | func (d *Destroy) Command(ctx context.Context) *redis.StatusCmd {
  function NewDestroy (line 488) | func NewDestroy(dmap string) *Destroy {
  function ParseDestroyCommand (line 509) | func ParseDestroyCommand(cmd redcon.Command) (*Destroy, error) {
  type Scan (line 530) | type Scan struct
    method SetMatch (line 547) | func (s *Scan) SetMatch(match string) *Scan {
    method SetCount (line 552) | func (s *Scan) SetCount(count int) *Scan {
    method SetReplica (line 557) | func (s *Scan) SetReplica() *Scan {
    method Command (line 562) | func (s *Scan) Command(ctx context.Context) *redis.ScanCmd {
  function NewScan (line 539) | func NewScan(partID uint64, dmap string, cursor uint64) *Scan {
  constant DefaultScanCount (line 582) | DefaultScanCount = 10
  function ParseScanCommand (line 584) | func ParseScanCommand(cmd redcon.Command) (*Scan, error) {
  type Incr (line 635) | type Incr struct
    method Command (line 649) | func (i *Incr) Command(ctx context.Context) *redis.IntCmd {
  function NewIncr (line 641) | func NewIncr(dmap, key string, delta int) *Incr {
  function ParseIncrCommand (line 658) | func ParseIncrCommand(cmd redcon.Command) (*Incr, error) {
  type Decr (line 675) | type Decr struct
    method Command (line 685) | func (d *Decr) Command(ctx context.Context) *redis.IntCmd {
  function NewDecr (line 679) | func NewDecr(dmap, key string, delta int) *Decr {
  function ParseDecrCommand (line 691) | func ParseDecrCommand(cmd redcon.Command) (*Decr, error) {
  type GetPut (line 708) | type GetPut struct
    method SetRaw (line 723) | func (g *GetPut) SetRaw() *GetPut {
    method Command (line 728) | func (g *GetPut) Command(ctx context.Context) *redis.StringCmd {
  function NewGetPut (line 715) | func NewGetPut(dmap, key string, value []byte) *GetPut {
  function ParseGetPutCommand (line 740) | func ParseGetPutCommand(cmd redcon.Command) (*GetPut, error) {
  type IncrByFloat (line 762) | type IncrByFloat struct
    method Command (line 776) | func (i *IncrByFloat) Command(ctx context.Context) *redis.FloatCmd {
  function NewIncrByFloat (line 768) | func NewIncrByFloat(dmap, key string, delta float64) *IncrByFloat {
  function ParseIncrByFloatCommand (line 785) | func ParseIncrByFloatCommand(cmd redcon.Command) (*IncrByFloat, error) {
  type Lock (line 802) | type Lock struct
    method SetEX (line 818) | func (l *Lock) SetEX(ex float64) *Lock {
    method SetPX (line 823) | func (l *Lock) SetPX(px int64) *Lock {
    method Command (line 828) | func (l *Lock) Command(ctx context.Context) *redis.StringCmd {
  function NewLock (line 810) | func NewLock(dmap, key string, deadline float64) *Lock {
  function ParseLockCommand (line 849) | func ParseLockCommand(cmd redcon.Command) (*Lock, error) {
  type Unlock (line 892) | type Unlock struct
    method Command (line 906) | func (u *Unlock) Command(ctx context.Context) *redis.StatusCmd {
  function NewUnlock (line 898) | func NewUnlock(dmap, key, token string) *Unlock {
  function ParseUnlockCommand (line 915) | func ParseUnlockCommand(cmd redcon.Command) (*Unlock, error) {
  type LockLease (line 927) | type LockLease struct
    method Command (line 943) | func (l *LockLease) Command(ctx context.Context) *redis.StatusCmd {
  function NewLockLease (line 934) | func NewLockLease(dmap, key, token string, timeout float64) *LockLease {
  function ParseLockLeaseCommand (line 953) | func ParseLockLeaseCommand(cmd redcon.Command) (*LockLease, error) {
  type PLockLease (line 971) | type PLockLease struct
    method Command (line 987) | func (p *PLockLease) Command(ctx context.Context) *redis.StatusCmd {
  function NewPLockLease (line 978) | func NewPLockLease(dmap, key, token string, timeout int64) *PLockLease {
  function ParsePLockLeaseCommand (line 997) | func ParsePLockLeaseCommand(cmd redcon.Command) (*PLockLease, error) {

FILE: internal/protocol/dmap_test.go
  function stringToCommand (line 27) | func stringToCommand(s string) redcon.Command {
  function TestProtocol_ParsePutCommand_EX (line 43) | func TestProtocol_ParsePutCommand_EX(t *testing.T) {
  function TestProtocol_ParsePutCommand_PX (line 57) | func TestProtocol_ParsePutCommand_PX(t *testing.T) {
  function TestProtocol_ParsePutCommand_NX (line 71) | func TestProtocol_ParsePutCommand_NX(t *testing.T) {
  function TestProtocol_ParsePutCommand_XX (line 86) | func TestProtocol_ParsePutCommand_XX(t *testing.T) {
  function TestProtocol_ParsePutCommand_EXAT (line 101) | func TestProtocol_ParsePutCommand_EXAT(t *testing.T) {
  function TestProtocol_ParsePutCommand_PXAT (line 116) | func TestProtocol_ParsePutCommand_PXAT(t *testing.T) {
  function TestProtocol_ParseScanCommand (line 131) | func TestProtocol_ParseScanCommand(t *testing.T) {
  function TestProtocol_ParseScanCommand_Replica (line 145) | func TestProtocol_ParseScanCommand_Replica(t *testing.T) {
  function TestProtocol_ParseScanCommand_Match (line 159) | func TestProtocol_ParseScanCommand_Match(t *testing.T) {
  function TestProtocol_ParseScanCommand_PartID (line 174) | func TestProtocol_ParseScanCommand_PartID(t *testing.T) {
  function TestProtocol_ParseScanCommand_Match_Count (line 189) | func TestProtocol_ParseScanCommand_Match_Count(t *testing.T) {
  function TestProtocol_ParseScanCommand_Match_Count_Replica (line 204) | func TestProtocol_ParseScanCommand_Match_Count_Replica(t *testing.T) {
  function TestProtocol_PutEntry (line 222) | func TestProtocol_PutEntry(t *testing.T) {
  function TestProtocol_Get (line 234) | func TestProtocol_Get(t *testing.T) {
  function TestProtocol_Get_RW (line 246) | func TestProtocol_Get_RW(t *testing.T) {
  function TestProtocol_GetEntry (line 259) | func TestProtocol_GetEntry(t *testing.T) {
  function TestProtocol_GetEntry_RC (line 271) | func TestProtocol_GetEntry_RC(t *testing.T) {
  function TestProtocol_Del (line 284) | func TestProtocol_Del(t *testing.T) {
  function TestProtocol_DelEntry (line 295) | func TestProtocol_DelEntry(t *testing.T) {
  function TestProtocol_DelEntry_RC (line 307) | func TestProtocol_DelEntry_RC(t *testing.T) {
  function TestProtocol_PExpire (line 320) | func TestProtocol_PExpire(t *testing.T) {
  function TestProtocol_Expire (line 332) | func TestProtocol_Expire(t *testing.T) {
  function TestProtocol_Destroy (line 344) | func TestProtocol_Destroy(t *testing.T) {
  function TestProtocol_Destroy_Local (line 355) | func TestProtocol_Destroy_Local(t *testing.T) {
  function TestProtocol_Incr (line 367) | func TestProtocol_Incr(t *testing.T) {
  function TestProtocol_Decr (line 379) | func TestProtocol_Decr(t *testing.T) {
  function TestProtocol_GetPut (line 391) | func TestProtocol_GetPut(t *testing.T) {
  function TestProtocol_GetPut_RW (line 404) | func TestProtocol_GetPut_RW(t *testing.T) {
  function TestProtocol_IncrByFloat (line 418) | func TestProtocol_IncrByFloat(t *testing.T) {
  function TestProtocol_Lock (line 430) | func TestProtocol_Lock(t *testing.T) {
  function TestProtocol_Lock_EX (line 442) | func TestProtocol_Lock_EX(t *testing.T) {
  function TestProtocol_Lock_PX (line 457) | func TestProtocol_Lock_PX(t *testing.T) {
  function TestProtocol_Unlock (line 472) | func TestProtocol_Unlock(t *testing.T) {
  function TestProtocol_LockLease (line 484) | func TestProtocol_LockLease(t *testing.T) {
  function TestProtocol_PLockLease (line 498) | func TestProtocol_PLockLease(t *testing.T) {
  function TestProtocol_Scan (line 512) | func TestProtocol_Scan(t *testing.T) {
  function TestProtocol_Scan_Count_Match_Replica (line 527) | func TestProtocol_Scan_Count_Match_Replica(t *testing.T) {

FILE: internal/protocol/errors.go
  function init (line 39) | func init() {
  function SetError (line 43) | func SetError(prefix string, err error) {
  function GetError (line 55) | func GetError(prefix string) error {
  function getPrefix (line 62) | func getPrefix(err error) string {
  function GetPrefix (line 70) | func GetPrefix(err error) string {
  function ConvertError (line 81) | func ConvertError(err error) error {
  function WriteError (line 98) | func WriteError(conn redcon.Conn, err error) {
  function errWrongNumber (line 103) | func errWrongNumber(args [][]byte) error {

FILE: internal/protocol/errors_test.go
  function TestProtocol_errWrongNumber (line 28) | func TestProtocol_errWrongNumber(t *testing.T) {
  function TestProtocol_GetPrefix (line 36) | func TestProtocol_GetPrefix(t *testing.T) {
  function TestProtocol_GetError (line 42) | func TestProtocol_GetError(t *testing.T) {
  function TestProtocol_ConvertError (line 48) | func TestProtocol_ConvertError(t *testing.T) {

FILE: internal/protocol/pubsub.go
  type Publish (line 25) | type Publish struct
    method Command (line 37) | func (p *Publish) Command(ctx context.Context) *redis.IntCmd {
  function NewPublish (line 30) | func NewPublish(channel, message string) *Publish {
  function ParsePublishCommand (line 45) | func ParsePublishCommand(cmd redcon.Command) (*Publish, error) {
  type PublishInternal (line 56) | type PublishInternal struct
    method Command (line 68) | func (p *PublishInternal) Command(ctx context.Context) *redis.IntCmd {
  function NewPublishInternal (line 61) | func NewPublishInternal(channel, message string) *PublishInternal {
  function ParsePublishInternalCommand (line 76) | func ParsePublishInternalCommand(cmd redcon.Command) (*PublishInternal, ...
  type Subscribe (line 87) | type Subscribe struct
    method Command (line 97) | func (s *Subscribe) Command(ctx context.Context) *redis.SliceCmd {
  function NewSubscribe (line 91) | func NewSubscribe(channels ...string) *Subscribe {
  function ParseSubscribeCommand (line 106) | func ParseSubscribeCommand(cmd redcon.Command) (*Subscribe, error) {
  type PSubscribe (line 121) | type PSubscribe struct
    method Command (line 131) | func (s *PSubscribe) Command(ctx context.Context) *redis.SliceCmd {
  function NewPSubscribe (line 125) | func NewPSubscribe(patterns ...string) *PSubscribe {
  function ParsePSubscribeCommand (line 140) | func ParsePSubscribeCommand(cmd redcon.Command) (*PSubscribe, error) {
  type PubSubChannels (line 155) | type PubSubChannels struct
    method SetPattern (line 163) | func (ps *PubSubChannels) SetPattern(pattern string) *PubSubChannels {
    method Command (line 168) | func (ps *PubSubChannels) Command(ctx context.Context) *redis.SliceCmd {
  function NewPubSubChannels (line 159) | func NewPubSubChannels() *PubSubChannels {
  function ParsePubSubChannelsCommand (line 177) | func ParsePubSubChannelsCommand(cmd redcon.Command) (*PubSubChannels, er...
  type PubSubNumpat (line 189) | type PubSubNumpat struct
    method Command (line 195) | func (ps *PubSubNumpat) Command(ctx context.Context) *redis.IntCmd {
  function NewPubSubNumpat (line 191) | func NewPubSubNumpat() *PubSubNumpat {
  function ParsePubSubNumpatCommand (line 201) | func ParsePubSubNumpatCommand(cmd redcon.Command) (*PubSubNumpat, error) {
  type PubSubNumsub (line 209) | type PubSubNumsub struct
    method Command (line 219) | func (ps *PubSubNumsub) Command(ctx context.Context) *redis.SliceCmd {
  function NewPubSubNumsub (line 213) | func NewPubSubNumsub(channels ...string) *PubSubNumsub {
  function ParsePubSubNumsubCommand (line 228) | func ParsePubSubNumsubCommand(cmd redcon.Command) (*PubSubNumsub, error) {

FILE: internal/protocol/pubsub_test.go
  function TestProtocol_ParsePublishCommand (line 10) | func TestProtocol_ParsePublishCommand(t *testing.T) {
  function TestProtocol_ParsePublishInternalCommand (line 21) | func TestProtocol_ParsePublishInternalCommand(t *testing.T) {
  function TestProtocol_ParseSubscribeCommand (line 32) | func TestProtocol_ParseSubscribeCommand(t *testing.T) {
  function TestProtocol_ParsePSubscribeCommand (line 43) | func TestProtocol_ParsePSubscribeCommand(t *testing.T) {
  function TestProtocol_PubSubChannels (line 54) | func TestProtocol_PubSubChannels(t *testing.T) {
  function TestProtocol_PubSubChannels_Patterns (line 63) | func TestProtocol_PubSubChannels_Patterns(t *testing.T) {
  function TestProtocol_PubSubNumpat (line 74) | func TestProtocol_PubSubNumpat(t *testing.T) {
  function TestProtocol_PubSubNumsub (line 82) | func TestProtocol_PubSubNumsub(t *testing.T) {

FILE: internal/protocol/system.go
  type Ping (line 27) | type Ping struct
    method SetMessage (line 35) | func (p *Ping) SetMessage(m string) *Ping {
    method Command (line 40) | func (p *Ping) Command(ctx context.Context) *redis.StringCmd {
  function NewPing (line 31) | func NewPing() *Ping {
  function ParsePingCommand (line 49) | func ParsePingCommand(cmd redcon.Command) (*Ping, error) {
  type MoveFragment (line 61) | type MoveFragment struct
    method Command (line 71) | func (m *MoveFragment) Command(ctx context.Context) *redis.StatusCmd {
  function NewMoveFragment (line 65) | func NewMoveFragment(payload []byte) *MoveFragment {
  function ParseMoveFragmentCommand (line 78) | func ParseMoveFragmentCommand(cmd redcon.Command) (*MoveFragment, error) {
  type UpdateRouting (line 86) | type UpdateRouting struct
    method Command (line 98) | func (u *UpdateRouting) Command(ctx context.Context) *redis.StringCmd {
  function NewUpdateRouting (line 91) | func NewUpdateRouting(payload []byte, coordinatorID uint64) *UpdateRouti...
  function ParseUpdateRoutingCommand (line 106) | func ParseUpdateRoutingCommand(cmd redcon.Command) (*UpdateRouting, erro...
  type LengthOfPart (line 118) | type LengthOfPart struct
    method SetReplica (line 129) | func (l *LengthOfPart) SetReplica() *LengthOfPart {
    method Command (line 134) | func (l *LengthOfPart) Command(ctx context.Context) *redis.IntCmd {
  function NewLengthOfPart (line 123) | func NewLengthOfPart(partID uint64) *LengthOfPart {
  function ParseLengthOfPartCommand (line 144) | func ParseLengthOfPartCommand(cmd redcon.Command) (*LengthOfPart, error) {
  type Stats (line 166) | type Stats struct
    method SetCollectRuntime (line 174) | func (s *Stats) SetCollectRuntime() *Stats {
    method Command (line 179) | func (s *Stats) Command(ctx context.Context) *redis.StringCmd {
  function NewStats (line 170) | func NewStats() *Stats {
  function ParseStatsCommand (line 188) | func ParseStatsCommand(cmd redcon.Command) (*Stats, error) {
  type Auth (line 207) | type Auth struct
    method Command (line 219) | func (a *Auth) Command(ctx context.Context) *redis.StatusCmd {
  function NewAuth (line 212) | func NewAuth(password string) *Auth {
  function ParseAuthCommand (line 229) | func ParseAuthCommand(cmd redcon.Command) (*Auth, error) {

FILE: internal/protocol/system_test.go
  function TestProtocol_Ping (line 24) | func TestProtocol_Ping(t *testing.T) {
  function TestProtocol_Ping_Message (line 34) | func TestProtocol_Ping_Message(t *testing.T) {
  function TestProtocol_MoveFragment (line 45) | func TestProtocol_MoveFragment(t *testing.T) {
  function TestProtocol_UpdateRoutingTable (line 55) | func TestProtocol_UpdateRoutingTable(t *testing.T) {
  function TestProtocol_LengthOfPart (line 66) | func TestProtocol_LengthOfPart(t *testing.T) {
  function TestProtocol_LengthOfPart_RC (line 77) | func TestProtocol_LengthOfPart_RC(t *testing.T) {
  function TestProtocol_Stats (line 89) | func TestProtocol_Stats(t *testing.T) {
  function TestProtocol_Stats_CR (line 99) | func TestProtocol_Stats_CR(t *testing.T) {
  function TestProtocol_Auth (line 110) | func TestProtocol_Auth(t *testing.T) {
  function TestProtocol_Auth_errWrongNumber (line 120) | func TestProtocol_Auth_errWrongNumber(t *testing.T) {

FILE: internal/pubsub/handlers.go
  method subscribeCommandHandler (line 22) | func (s *Service) subscribeCommandHandler(conn redcon.Conn, cmd redcon.C...
  method publishCommandHandler (line 36) | func (s *Service) publishCommandHandler(conn redcon.Conn, cmd redcon.Com...
  method publishInternalCommandHandler (line 72) | func (s *Service) publishInternalCommandHandler(conn redcon.Conn, cmd re...
  method psubscribeCommandHandler (line 82) | func (s *Service) psubscribeCommandHandler(conn redcon.Conn, cmd redcon....
  method pubsubChannelsCommandHandler (line 96) | func (s *Service) pubsubChannelsCommandHandler(conn redcon.Conn, cmd red...
  method pubsubNumpatCommandHandler (line 115) | func (s *Service) pubsubNumpatCommandHandler(conn redcon.Conn, cmd redco...
  method pubsubNumsubCommandHandler (line 125) | func (s *Service) pubsubNumsubCommandHandler(conn redcon.Conn, cmd redco...

FILE: internal/pubsub/handlers_test.go
  function TestPubSub_Handler_Subscribe (line 28) | func TestPubSub_Handler_Subscribe(t *testing.T) {
  function TestPubSub_Handler_Unsubscribe (line 77) | func TestPubSub_Handler_Unsubscribe(t *testing.T) {
  function TestPubSub_Handler_PSubscribe (line 114) | func TestPubSub_Handler_PSubscribe(t *testing.T) {
  function TestPubSub_Handler_PUnsubscribe (line 164) | func TestPubSub_Handler_PUnsubscribe(t *testing.T) {
  function TestPubSub_Handler_Ping (line 204) | func TestPubSub_Handler_Ping(t *testing.T) {
  function TestPubSub_Handler_Close (line 225) | func TestPubSub_Handler_Close(t *testing.T) {
  function TestPubSub_Handler_PubSubChannels_Without_Patterns (line 246) | func TestPubSub_Handler_PubSubChannels_Without_Patterns(t *testing.T) {
  function TestPubSub_Handler_PubSubChannels_With_Patterns (line 273) | func TestPubSub_Handler_PubSubChannels_With_Patterns(t *testing.T) {
  function TestPubSub_Handler_PubSubNumpat (line 301) | func TestPubSub_Handler_PubSubNumpat(t *testing.T) {
  function TestPubSub_Handler_PubSubNumsub (line 322) | func TestPubSub_Handler_PubSubNumsub(t *testing.T) {
  function TestPubSub_Cluster (line 345) | func TestPubSub_Cluster(t *testing.T) {

FILE: internal/pubsub/pubsub.go
  type PubSub (line 35) | type PubSub struct
    method Subscribe (line 48) | func (ps *PubSub) Subscribe(conn redcon.Conn, channel string) {
    method Psubscribe (line 53) | func (ps *PubSub) Psubscribe(conn redcon.Conn, channel string) {
    method Publish (line 58) | func (ps *PubSub) Publish(channel, message string) int {
    method subscribe (line 253) | func (ps *PubSub) subscribe(conn redcon.Conn, pattern bool, channel st...
    method unsubscribe (line 314) | func (ps *PubSub) unsubscribe(conn redcon.Conn, pattern, all bool, cha...
    method Channels (line 381) | func (ps *PubSub) Channels() []string {
    method ChannelsWithPatterns (line 403) | func (ps *PubSub) ChannelsWithPatterns(pattern string) []string {
    method Numpat (line 425) | func (ps *PubSub) Numpat() int {
    method Numsub (line 447) | func (ps *PubSub) Numsub(channel string) int {
  type pubSubConn (line 92) | type pubSubConn struct
    method writeMessage (line 106) | func (sconn *pubSubConn) writeMessage(pat bool, pchan, channel, msg st...
    method bgrunner (line 126) | func (sconn *pubSubConn) bgrunner(ps *PubSub) {
  type pubSubEntry (line 100) | type pubSubEntry struct
  function byEntry (line 227) | func byEntry(a, b interface{}) bool {

FILE: internal/pubsub/pubsub_test.go
  function testPubSubServer (line 38) | func testPubSubServer(addr string, done chan bool) {
  function TestPubSub (line 102) | func TestPubSub(t *testing.T) {

FILE: internal/pubsub/service.go
  type Service (line 44) | type Service struct
    method RegisterHandlers (line 57) | func (s *Service) RegisterHandlers() {
    method Start (line 91) | func (s *Service) Start() error {
    method Shutdown (line 96) | func (s *Service) Shutdown(ctx context.Context) error {
  function NewService (line 68) | func NewService(e *environment.Environment) (service.Service, error) {

FILE: internal/ramblock/compaction.go
  method evictTable (line 26) | func (rb *RamBlock) evictTable(t *table.Table) error {
  method isTableExpired (line 69) | func (rb *RamBlock) isTableExpired(recycledAt int64) bool {
  method isCompactionOK (line 79) | func (rb *RamBlock) isCompactionOK(t *table.Table) bool {
  method Compaction (line 84) | func (rb *RamBlock) Compaction() (bool, error) {

FILE: internal/ramblock/compaction_test.go
  function TestRamBlock_Compaction (line 28) | func TestRamBlock_Compaction(t *testing.T) {
  function TestRamBlock_Compaction_MaxIdleTableDuration (line 73) | func TestRamBlock_Compaction_MaxIdleTableDuration(t *testing.T) {

FILE: internal/ramblock/entry/entry.go
  type Entry (line 28) | type Entry struct
    method SetKey (line 42) | func (e *Entry) SetKey(key string) {
    method Key (line 46) | func (e *Entry) Key() string {
    method SetValue (line 50) | func (e *Entry) SetValue(value []byte) {
    method Value (line 54) | func (e *Entry) Value() []byte {
    method SetTTL (line 58) | func (e *Entry) SetTTL(ttl int64) {
    method TTL (line 62) | func (e *Entry) TTL() int64 {
    method SetTimestamp (line 66) | func (e *Entry) SetTimestamp(timestamp int64) {
    method Timestamp (line 70) | func (e *Entry) Timestamp() int64 {
    method SetLastAccess (line 74) | func (e *Entry) SetLastAccess(lastAccess int64) {
    method LastAccess (line 78) | func (e *Entry) LastAccess() int64 {
    method Encode (line 82) | func (e *Entry) Encode() []byte {
    method Decode (line 120) | func (e *Entry) Decode(buf []byte) {
  function New (line 38) | func New() *Entry {

FILE: internal/ramblock/entry/entry_test.go
  function TestEntryEncodeDecode (line 25) | func TestEntryEncodeDecode(t *testing.T) {
  function TestEntry_Encode_EmptyKey (line 45) | func TestEntry_Encode_EmptyKey(t *testing.T) {
  function TestEntry_Encode_EmptyValue (line 65) | func TestEntry_Encode_EmptyValue(t *testing.T) {
  function TestEntry_Encode_MaxLengthKey (line 85) | func TestEntry_Encode_MaxLengthKey(t *testing.T) {
  function TestEntry_Encode_KeyLengthOverflow (line 106) | func TestEntry_Encode_KeyLengthOverflow(t *testing.T) {

FILE: internal/ramblock/ramblock.go
  constant maxGarbageRatio (line 36) | maxGarbageRatio = 0.40
  constant defaultTableSize (line 38) | defaultTableSize = uint64(1 << 20)
  constant defaultMaxIdleTableTimeout (line 40) | defaultMaxIdleTableTimeout = 15 * time.Minute
  type RamBlock (line 44) | type RamBlock struct
    method SetConfig (line 81) | func (rb *RamBlock) SetConfig(c *storage.Config) {
    method makeTable (line 85) | func (rb *RamBlock) makeTable() error {
    method SetLogger (line 114) | func (rb *RamBlock) SetLogger(_ *log.Logger) {}
    method Start (line 116) | func (rb *RamBlock) Start() error {
    method Fork (line 182) | func (rb *RamBlock) Fork(c *storage.Config) (storage.Engine, error) {
    method Name (line 199) | func (rb *RamBlock) Name() string {
    method NewEntry (line 203) | func (rb *RamBlock) NewEntry() storage.Entry {
    method putWithRetry (line 209) | func (rb *RamBlock) putWithRetry(writeFn func(t *table.Table) error) e...
    method PutRaw (line 236) | func (rb *RamBlock) PutRaw(hkey uint64, value []byte) error {
    method Put (line 247) | func (rb *RamBlock) Put(hkey uint64, value storage.Entry) error {
    method GetRaw (line 258) | func (rb *RamBlock) GetRaw(hkey uint64) ([]byte, error) {
    method Get (line 281) | func (rb *RamBlock) Get(hkey uint64) (storage.Entry, error) {
    method GetTTL (line 302) | func (rb *RamBlock) GetTTL(hkey uint64) (int64, error) {
    method GetLastAccess (line 322) | func (rb *RamBlock) GetLastAccess(hkey uint64) (int64, error) {
    method GetKey (line 344) | func (rb *RamBlock) GetKey(hkey uint64) (string, error) {
    method Delete (line 365) | func (rb *RamBlock) Delete(hkey uint64) error {
    method UpdateTTL (line 384) | func (rb *RamBlock) UpdateTTL(hkey uint64, data storage.Entry) error {
    method Stats (line 404) | func (rb *RamBlock) Stats() storage.Stats {
    method Check (line 419) | func (rb *RamBlock) Check(hkey uint64) bool {
    method Range (line 437) | func (rb *RamBlock) Range(f func(hkey uint64, e storage.Entry) bool) {
    method RangeHKey (line 451) | func (rb *RamBlock) RangeHKey(f func(hkey uint64) bool) {
    method findCoefficient (line 461) | func (rb *RamBlock) findCoefficient(coefficient uint64) (uint64, error) {
    method scanCommon (line 475) | func (rb *RamBlock) scanCommon(cursor uint64, expr string, count int, ...
    method Scan (line 525) | func (rb *RamBlock) Scan(cursor uint64, count int, f func(e storage.En...
    method ScanRegexMatch (line 529) | func (rb *RamBlock) ScanRegexMatch(cursor uint64, expr string, count i...
    method Close (line 533) | func (rb *RamBlock) Close() error {
    method Destroy (line 537) | func (rb *RamBlock) Destroy() error {
  function DefaultConfig (line 52) | func DefaultConfig() *storage.Config {
  function New (line 59) | func New(c *storage.Config) (*RamBlock, error) {
  function requiredSizeForAnEntry (line 123) | func requiredSizeForAnEntry(e storage.Entry) uint64 {
  function prepareTableSize (line 127) | func prepareTableSize(raw interface{}) (size uint64, err error) {

FILE: internal/ramblock/ramblock_test.go
  function bkey (line 33) | func bkey(i int) string {
  function bval (line 37) | func bval(i int) []byte {
  function testRamBlock (line 41) | func testRamBlock(t *testing.T, c *storage.Config) storage.Engine {
  function TestRamBlock_Put (line 54) | func TestRamBlock_Put(t *testing.T) {
  function TestRamBlock_Get (line 69) | func TestRamBlock_Get(t *testing.T) {
  function TestRamBlock_Delete (line 96) | func TestRamBlock_Delete(t *testing.T) {
  function TestRamBlock_ExportImport (line 133) | func TestRamBlock_ExportImport(t *testing.T) {
  function TestRamBlock_Stats_Length (line 178) | func TestRamBlock_Stats_Length(t *testing.T) {
  function TestRamBlock_Range (line 194) | func TestRamBlock_Range(t *testing.T) {
  function TestRamBlock_Check (line 218) | func TestRamBlock_Check(t *testing.T) {
  function TestRamBlock_UpdateTTL (line 240) | func TestRamBlock_UpdateTTL(t *testing.T) {
  function TestRamBlock_GetKey (line 277) | func TestRamBlock_GetKey(t *testing.T) {
  function TestRamBlock_PutRawGetRaw (line 296) | func TestRamBlock_PutRawGetRaw(t *testing.T) {
  function TestRamBlock_GetTTL (line 312) | func TestRamBlock_GetTTL(t *testing.T) {
  function TestRamBlock_GetLastAccess (line 332) | func TestRamBlock_GetLastAccess(t *testing.T) {
  function TestRamBlock_Fork (line 349) | func TestRamBlock_Fork(t *testing.T) {
  function TestRamBlock_StateChange (line 397) | func TestRamBlock_StateChange(t *testing.T) {
  function TestRamBlock_NewEntry (line 422) | func TestRamBlock_NewEntry(t *testing.T) {
  function TestRamBlock_Name (line 430) | func TestRamBlock_Name(t *testing.T) {
  function TestRamBlock_CloseDestroy (line 435) | func TestRamBlock_CloseDestroy(t *testing.T) {
  function TestStorage_Scan (line 441) | func TestStorage_Scan(t *testing.T) {
  function TestStorage_ScanRegexMatch (line 475) | func TestStorage_ScanRegexMatch(t *testing.T) {
  function TestStorage_ScanRegexMatch_OnlyOneEntry (line 516) | func TestStorage_ScanRegexMatch_OnlyOneEntry(t *testing.T) {
  function TestStorage_Scan_NonContiguousCoefficients (line 563) | func TestStorage_Scan_NonContiguousCoefficients(t *testing.T) {
  function TestRamBlock_Put_ErrEntryTooLarge (line 638) | func TestRamBlock_Put_ErrEntryTooLarge(t *testing.T) {
  function TestPrepareTableSize_NegativeValues (line 654) | func TestPrepareTableSize_NegativeValues(t *testing.T) {
  function TestPrepareTableSize_ValidValues (line 675) | func TestPrepareTableSize_ValidValues(t *testing.T) {
  function TestPrepareTableSize_InvalidType (line 697) | func TestPrepareTableSize_InvalidType(t *testing.T) {
  function TestRamBlock_New_NegativeTableSize (line 703) | func TestRamBlock_New_NegativeTableSize(t *testing.T) {
  function TestRamBlock_Start_NilConfig (line 712) | func TestRamBlock_Start_NilConfig(t *testing.T) {
  function TestRamBlock_PutRaw_ErrEntryTooLarge (line 722) | func TestRamBlock_PutRaw_ErrEntryTooLarge(t *testing.T) {
  function TestRamBlock_GetRaw_KeyNotFound (line 733) | func TestRamBlock_GetRaw_KeyNotFound(t *testing.T) {
  function TestRamBlock_GetTTL_KeyNotFound (line 742) | func TestRamBlock_GetTTL_KeyNotFound(t *testing.T) {
  function TestRamBlock_GetLastAccess_KeyNotFound (line 751) | func TestRamBlock_GetLastAccess_KeyNotFound(t *testing.T) {
  function TestRamBlock_GetKey_KeyNotFound (line 760) | func TestRamBlock_GetKey_KeyNotFound(t *testing.T) {
  function TestRamBlock_Delete_NonExistentKey (line 769) | func TestRamBlock_Delete_NonExistentKey(t *testing.T) {
  function TestRamBlock_UpdateTTL_KeyNotFound (line 777) | func TestRamBlock_UpdateTTL_KeyNotFound(t *testing.T) {
  function TestRamBlock_Check_KeyNotFound (line 789) | func TestRamBlock_Check_KeyNotFound(t *testing.T) {
  function TestRamBlock_RangeHKey (line 796) | func TestRamBlock_RangeHKey(t *testing.T) {
  function TestRamBlock_SetConfig (line 819) | func TestRamBlock_SetConfig(t *testing.T) {
  function TestRamBlock_Fork_CustomConfig (line 834) | func TestRamBlock_Fork_CustomConfig(t *testing.T) {
  function TestRamBlock_MakeTable_RecycledTableReuse (line 849) | func TestRamBlock_MakeTable_RecycledTableReuse(t *testing.T) {
  function TestRamBlock_EvictTable_PutRawError (line 923) | func TestRamBlock_EvictTable_PutRawError(t *testing.T) {
  function TestRamBlock_Compaction_NoTables (line 957) | func TestRamBlock_Compaction_NoTables(t *testing.T) {
  function TestRamBlock_IsCompactionOK_ExactThreshold (line 970) | func TestRamBlock_IsCompactionOK_ExactThreshold(t *testing.T) {
  function TestTransferIterator_Drop_EmptyTables (line 1024) | func TestTransferIterator_Drop_EmptyTables(t *testing.T) {
  function TestTransferIterator_Export_SkipsRecycledState (line 1052) | func TestTransferIterator_Export_SkipsRecycledState(t *testing.T) {

FILE: internal/ramblock/table/pack.go
  type Pack (line 24) | type Pack struct
  function Encode (line 38) | func Encode(t *Table) ([]byte, error) {
  function Decode (line 61) | func Decode(data []byte) (*Table, error) {

FILE: internal/ramblock/table/pack_test.go
  function bkey (line 27) | func bkey(i int) string {
  function bval (line 31) | func bval(i int) []byte {
  function TestTable_Pack_Decode_CorruptData (line 35) | func TestTable_Pack_Decode_CorruptData(t *testing.T) {
  function TestTable_Pack_EncodeDecode_GarbageAndRecycledAt (line 40) | func TestTable_Pack_EncodeDecode_GarbageAndRecycledAt(t *testing.T) {
  function TestTable_Pack_EncodeDecode (line 102) | func TestTable_Pack_EncodeDecode(t *testing.T) {

FILE: internal/ramblock/table/table.go
  constant MaxKeyLength (line 32) | MaxKeyLength = 256
  constant MetadataLength (line 36) | MetadataLength = 29
  type State (line 40) | type State
  constant ReadWriteState (line 44) | ReadWriteState = State(iota + 1)
  constant ReadOnlyState (line 47) | ReadOnlyState
  constant RecycledState (line 50) | RecycledState
  type Stats (line 63) | type Stats struct
  type Table (line 87) | type Table struct
    method SetCoefficient (line 122) | func (t *Table) SetCoefficient(cf uint64) {
    method Coefficient (line 127) | func (t *Table) Coefficient() uint64 {
    method SetState (line 132) | func (t *Table) SetState(s State) {
    method State (line 137) | func (t *Table) State() State {
    method PutRaw (line 144) | func (t *Table) PutRaw(hkey uint64, value []byte) error {
    method Put (line 167) | func (t *Table) Put(hkey uint64, value storage.Entry) error {
    method GetRaw (line 227) | func (t *Table) GetRaw(hkey uint64) ([]byte, error) {
    method getRawKey (line 255) | func (t *Table) getRawKey(offset uint64) ([]byte, error) {
    method GetRawKey (line 263) | func (t *Table) GetRawKey(hkey uint64) ([]byte, error) {
    method GetKey (line 274) | func (t *Table) GetKey(hkey uint64) (string, error) {
    method GetTTL (line 284) | func (t *Table) GetTTL(hkey uint64) (int64, error) {
    method GetLastAccess (line 299) | func (t *Table) GetLastAccess(hkey uint64) (int64, error) {
    method get (line 316) | func (t *Table) get(offset uint64) storage.Entry {
    method Get (line 353) | func (t *Table) Get(hkey uint64) (storage.Entry, error) {
    method Delete (line 365) | func (t *Table) Delete(hkey uint64) error {
    method UpdateTTL (line 409) | func (t *Table) UpdateTTL(hkey uint64, value storage.Entry) error {
    method Check (line 435) | func (t *Table) Check(hkey uint64) bool {
    method Stats (line 441) | func (t *Table) Stats() Stats {
    method Range (line 453) | func (t *Table) Range(f func(hkey uint64, e storage.Entry) bool) {
    method RangeHKey (line 468) | func (t *Table) RangeHKey(f func(hkey uint64) bool) {
    method Reset (line 479) | func (t *Table) Reset() {
    method Scan (line 496) | func (t *Table) Scan(cursor uint64, count int, f func(e storage.Entry)...
    method ScanRegexMatch (line 524) | func (t *Table) ScanRegexMatch(cursor uint64, expr string, count int, ...
  function New (line 102) | func New(size uint64) *Table {

FILE: internal/ramblock/table/table_test.go
  constant hkey (line 32) | hkey uint64 = 18071988
  function setupTable (line 34) | func setupTable() (*Table, storage.Entry) {
  function TestTable_Put (line 42) | func TestTable_Put(t *testing.T) {
  function TestTable_Get (line 48) | func TestTable_Get(t *testing.T) {
  function TestTable_Delete (line 63) | func TestTable_Delete(t *testing.T) {
  function TestTable_Check (line 76) | func TestTable_Check(t *testing.T) {
  function TestTable_PutRaw (line 89) | func TestTable_PutRaw(t *testing.T) {
  function TestTable_GetRaw (line 100) | func TestTable_GetRaw(t *testing.T) {
  function TestTable_GetRawKey (line 118) | func TestTable_GetRawKey(t *testing.T) {
  function TestTable_GetKey (line 129) | func TestTable_GetKey(t *testing.T) {
  function TestTable_SetState (line 140) | func TestTable_SetState(t *testing.T) {
  function TestTable_GetTTL (line 146) | func TestTable_GetTTL(t *testing.T) {
  function TestTable_GetLastAccess (line 159) | func TestTable_GetLastAccess(t *testing.T) {
  function TestTable_UpdateTTL (line 170) | func TestTable_UpdateTTL(t *testing.T) {
  function TestTable_UpdateTTL_Update_LastAccess (line 187) | func TestTable_UpdateTTL_Update_LastAccess(t *testing.T) {
  function TestTable_State (line 210) | func TestTable_State(t *testing.T) {
  function TestTable_Range (line 215) | func TestTable_Range(t *testing.T) {
  function TestTable_Stats (line 246) | func TestTable_Stats(t *testing.T) {
  function TestTable_Reset (line 279) | func TestTable_Reset(t *testing.T) {
  function TestTable_Reset_RecycleScenario (line 312) | func TestTable_Reset_RecycleScenario(t *testing.T) {
  function TestTable_Scan (line 368) | func TestTable_Scan(t *testing.T) {
  function TestTable_ScanRegexMatch (line 396) | func TestTable_ScanRegexMatch(t *testing.T) {
  function TestTable_Put_ErrKeyTooLarge (line 434) | func TestTable_Put_ErrKeyTooLarge(t *testing.T) {
  function TestTable_Put_OverwriteExistingKey (line 445) | func TestTable_Put_OverwriteExistingKey(t *testing.T) {
  function TestTable_PutRaw_ErrNotEnoughSpace (line 473) | func TestTable_PutRaw_ErrNotEnoughSpace(t *testing.T) {
  function TestTable_Put_ErrNotEnoughSpace (line 480) | func TestTable_Put_ErrNotEnoughSpace(t *testing.T) {
  function TestTable_GetRaw_ErrHKeyNotFound (line 490) | func TestTable_GetRaw_ErrHKeyNotFound(t *testing.T) {
  function TestTable_Delete_ErrHKeyNotFound (line 496) | func TestTable_Delete_ErrHKeyNotFound(t *testing.T) {
  function TestTable_UpdateTTL_ErrHKeyNotFound (line 502) | func TestTable_UpdateTTL_ErrHKeyNotFound(t *testing.T) {
  function TestTable_RangeHKey (line 510) | func TestTable_RangeHKey(t *testing.T) {
  function TestTable_ScanRegexMatch_InvalidRegex (line 540) | func TestTable_ScanRegexMatch_InvalidRegex(t *testing.T) {
  function TestTable_Coefficient (line 554) | func TestTable_Coefficient(t *testing.T) {
  function TestTable_ScanRegexMatch_SingleMatch (line 565) | func TestTable_ScanRegexMatch_SingleMatch(t *testing.T) {

FILE: internal/ramblock/transport.go
  type transferIterator (line 25) | type transferIterator struct
    method Next (line 29) | func (t *transferIterator) Next() bool {
    method Drop (line 33) | func (t *transferIterator) Drop(index int) error {
    method Export (line 45) | func (t *transferIterator) Export() ([]byte, int, error) {
  method Import (line 60) | func (rb *RamBlock) Import(data []byte, f func(uint64, storage.Entry) er...
  method TransferIterator (line 72) | func (rb *RamBlock) TransferIterator() storage.TransferIterator {

FILE: internal/resp/encoder.go
  type encoder (line 39) | type encoder interface
  type Encoder (line 45) | type Encoder struct
    method Encode (line 61) | func (e *Encoder) Encode(v interface{}) error {
    method bytes (line 115) | func (e *Encoder) bytes(b []byte) error {
    method string (line 122) | func (e *Encoder) string(s string) error {
    method uint (line 126) | func (e *Encoder) uint(n uint64) error {
    method int (line 131) | func (e *Encoder) int(n int64) error {
    method float (line 136) | func (e *Encoder) float(f float64) error {
  function New (line 52) | func New(e encoder) *Encoder {

FILE: internal/resp/encoder_test.go
  type MyType (line 13) | type MyType struct
    method MarshalBinary (line 17) | func (t *MyType) MarshalBinary() ([]byte, error) {
    method UnmarshalBinary (line 21) | func (t *MyType) UnmarshalBinary(data []byte) error {
  function TestWriter_WriteArg (line 28) | func TestWriter_WriteArg(t *testing.T) {

FILE: internal/resp/scan.go
  function Scan (line 40) | func Scan(b []byte, v interface{}) error {

FILE: internal/roundrobin/round_robin.go
  type RoundRobin (line 27) | type RoundRobin struct
    method Get (line 45) | func (r *RoundRobin) Get() (string, error) {
    method Add (line 68) | func (r *RoundRobin) Add(item string) {
    method Delete (line 76) | func (r *RoundRobin) Delete(item string) {
    method Length (line 89) | func (r *RoundRobin) Length() int {
  function New (line 37) | func New(items []string) *RoundRobin {

FILE: internal/roundrobin/round_robin_test.go
  function TestRoundRobin (line 23) | func TestRoundRobin(t *testing.T) {
  function TestRoundRobin_Delete_NonExistent (line 75) | func TestRoundRobin_Delete_NonExistent(t *testing.T) {

FILE: internal/server/client.go
  type Client (line 28) | type Client struct
    method Addresses (line 51) | func (c *Client) Addresses() map[string]struct{} {
    method Get (line 62) | func (c *Client) Get(addr string) *redis.Client {
    method pickNodeRoundRobin (line 91) | func (c *Client) pickNodeRoundRobin() (string, error) {
    method Pick (line 105) | func (c *Client) Pick() (*redis.Client, error) {
    method Close (line 113) | func (c *Client) Close(addr string) error {
    method Shutdown (line 130) | func (c *Client) Shutdown(ctx context.Context) error {
  function NewClient (line 36) | func NewClient(c *config.Client) *Client {

FILE: internal/server/client_test.go
  function TestServer_Client_Get (line 29) | func TestServer_Client_Get(t *testing.T) {
  function TestServer_Client_Pick (line 59) | func TestServer_Client_Pick(t *testing.T) {
  function TestServer_Client_Close (line 99) | func TestServer_Client_Close(t *testing.T) {
  function TestServer_Client_Shutdown (line 118) | func TestServer_Client_Shutdown(t *testing.T) {

FILE: internal/server/handler.go
  type ServeMuxWrapper (line 25) | type ServeMuxWrapper struct
    method HandleFunc (line 75) | func (m *ServeMuxWrapper) HandleFunc(command string, handler func(conn...
  type HandlerFunc (line 34) | type HandlerFunc
  type Handler (line 36) | type Handler struct
    method ServeRESP (line 42) | func (h Handler) ServeRESP(conn redcon.Conn, cmd redcon.Command) {

FILE: internal/server/handler_test.go
  function respEcho (line 29) | func respEcho(t *testing.T, s *Server) {
  function TestHandler_ServeRESP_PreCondition (line 52) | func TestHandler_ServeRESP_PreCondition(t *testing.T) {
  function TestHandler_ServeRESP_PreCondition_DontCheck (line 67) | func TestHandler_ServeRESP_PreCondition_DontCheck(t *testing.T) {

FILE: internal/server/mux.go
  type ServeMux (line 38) | type ServeMux struct
    method HandleFunc (line 53) | func (m *ServeMux) HandleFunc(command string, handler redcon.Handler) {
    method Handle (line 62) | func (m *ServeMux) Handle(command string, handler redcon.Handler) {
    method ServeRESP (line 77) | func (m *ServeMux) ServeRESP(conn redcon.Conn, cmd redcon.Command) {
  function NewServeMux (line 44) | func NewServeMux(c *Config) *ServeMux {

FILE: internal/server/mux_test.go
  function TestMux_PubSub_Command (line 28) | func TestMux_PubSub_Command(t *testing.T) {

FILE: internal/server/server.go
  type Config (line 48) | type Config struct
  type ConnContext (line 57) | type ConnContext struct
    method SetAuthenticated (line 70) | func (c *ConnContext) SetAuthenticated(authenticated bool) {
    method IsAuthenticated (line 78) | func (c *ConnContext) IsAuthenticated() bool {
  function NewConnContext (line 65) | func NewConnContext() *ConnContext {
  type ConnWrapper (line 86) | type ConnWrapper struct
    method Write (line 92) | func (cw *ConnWrapper) Write(b []byte) (n int, err error) {
    method Read (line 103) | func (cw *ConnWrapper) Read(b []byte) (n int, err error) {
  type ListenerWrapper (line 114) | type ListenerWrapper struct
    method Accept (line 120) | func (lw *ListenerWrapper) Accept() (net.Conn, error) {
  type Server (line 139) | type Server struct
    method SetPreConditionFunc (line 177) | func (s *Server) SetPreConditionFunc(f func(conn redcon.Conn, cmd redc...
    method ServeMux (line 187) | func (s *Server) ServeMux() *ServeMuxWrapper {
    method ListenAndServe (line 192) | func (s *Server) ListenAndServe() error {
    method Shutdown (line 237) | func (s *Server) Shutdown(ctx context.Context) error {
  function New (line 156) | func New(c *Config, l *flog.Logger) *Server {

FILE: internal/server/server_test.go
  function getFreePort (line 33) | func getFreePort() (int, error) {
  function newServerWithPreConditionFunc (line 50) | func newServerWithPreConditionFunc(t *testing.T, precond func(conn redco...
  function newServer (line 85) | func newServer(t *testing.T) *Server {
  function defaultRedisOptions (line 93) | func defaultRedisOptions(c *Config) *redis.Options {
  function TestServer_RESP (line 99) | func TestServer_RESP(t *testing.T) {
  function TestServer_RESP_Stats (line 105) | func TestServer_RESP_Stats(t *testing.T) {
  function TestConnContext_Authentication (line 117) | func TestConnContext_Authentication(t *testing.T) {

FILE: internal/service/service.go
  type Service (line 21) | type Service interface

FILE: internal/stats/stats.go
  type Int64Counter (line 21) | type Int64Counter struct
    method Increase (line 31) | func (c *Int64Counter) Increase(delta int64) {
    method Read (line 36) | func (c *Int64Counter) Read() int64 {
    method Reset (line 41) | func (c *Int64Counter) Reset() {
  function NewInt64Counter (line 26) | func NewInt64Counter() *Int64Counter {
  type Int64Gauge (line 47) | type Int64Gauge struct
    method Increase (line 57) | func (c *Int64Gauge) Increase(delta int64) {
    method Decrease (line 62) | func (c *Int64Gauge) Decrease(delta int64) {
    method Read (line 67) | func (c *Int64Gauge) Read() int64 {
    method Reset (line 72) | func (c *Int64Gauge) Reset() {
  function NewInt64Gauge (line 52) | func NewInt64Gauge() *Int64Gauge {

FILE: internal/stats/stats_test.go
  function TestUint64Counter (line 24) | func TestUint64Counter(t *testing.T) {
  function TestUint64Gauge (line 41) | func TestUint64Gauge(t *testing.T) {

FILE: internal/testcluster/testcluster.go
  type TestCluster (line 36) | type TestCluster struct
    method newService (line 63) | func (t *TestCluster) newService(e *environment.Environment) service.S...
    method syncCluster (line 103) | func (t *TestCluster) syncCluster() {
    method AddMember (line 121) | func (t *TestCluster) AddMember(e *environment.Environment) service.Se...
    method Shutdown (line 174) | func (t *TestCluster) Shutdown() {
  function NewEnvironment (line 47) | func NewEnvironment(c *config.Config) *environment.Environment {
  function New (line 94) | func New(constructor func(e *environment.Environment) (service.Service, ...

FILE: internal/testutil/mockfragment/mockfragment.go
  type Result (line 28) | type Result struct
  type MockFragment (line 33) | type MockFragment struct
    method Stats (line 46) | func (f *MockFragment) Stats() storage.Stats {
    method Name (line 54) | func (f *MockFragment) Name() string {
    method Put (line 58) | func (f *MockFragment) Put(key string, value interface{}) {
    method Get (line 64) | func (f *MockFragment) Get(key string) interface{} {
    method Delete (line 70) | func (f *MockFragment) Delete(key string) {
    method Fill (line 76) | func (f *MockFragment) Fill() {
    method Result (line 91) | func (f *MockFragment) Result() map[partitions.Kind]map[uint64]Result {
    method Move (line 95) | func (f *MockFragment) Move(part *partitions.Partition, name string, o...
    method Compaction (line 113) | func (f *MockFragment) Compaction() (bool, error) {
    method Destroy (line 117) | func (f *MockFragment) Destroy() error {
    method Close (line 124) | func (f *MockFragment) Close() error {
  function New (line 39) | func New() *MockFragment {

FILE: internal/testutil/testutil.go
  function GetFreePort (line 31) | func GetFreePort() (int, error) {
  function NewFlogger (line 48) | func NewFlogger(c *config.Config) *flog.Logger {
  function NewEngineConfig (line 57) | func NewEngineConfig(t *testing.T) *config.Engine {
  function NewConfig (line 64) | func NewConfig() *config.Config {
  function NewServer (line 86) | func NewServer(c *config.Config) *server.Server {
  function TryWithInterval (line 96) | func TryWithInterval(max int, interval time.Duration, f func() error) er...
  function ToKey (line 119) | func ToKey(i int) string {
  function ToVal (line 123) | func ToVal(i int) []byte {

FILE: internal/util/safe.go
  function BytesToString (line 32) | func BytesToString(b []byte) string {
  function StringToBytes (line 36) | func StringToBytes(s string) []byte {

FILE: internal/util/strconv.go
  function Atoi (line 31) | func Atoi(b []byte) (int, error) {
  function ParseInt (line 35) | func ParseInt(b []byte, base int, bitSize int) (int64, error) {
  function ParseUint (line 39) | func ParseUint(b []byte, base int, bitSize int) (uint64, error) {
  function ParseFloat (line 43) | func ParseFloat(b []byte, bitSize int) (float64, error) {

FILE: internal/util/unsafe.go
  function BytesToString (line 37) | func BytesToString(b []byte) string {
  function StringToBytes (line 42) | func StringToBytes(s string) []byte {

FILE: olric.go
  constant ReleaseVersion (line 60) | ReleaseVersion string = "0.7.3"
  type Olric (line 108) | type Olric struct
    method preconditionFunc (line 262) | func (db *Olric) preconditionFunc(conn redcon.Conn, _ redcon.Command) ...
    method registerCommandHandlers (line 271) | func (db *Olric) registerCommandHandlers() {
    method callStartedCallback (line 281) | func (db *Olric) callStartedCallback() {
    method isOperable (line 317) | func (db *Olric) isOperable() error {
    method Start (line 327) | func (db *Olric) Start() error {
    method Shutdown (line 411) | func (db *Olric) Shutdown(ctx context.Context) error {
  function prepareConfig (line 140) | func prepareConfig(c *config.Config) (*config.Config, error) {
  function initializeServices (line 172) | func initializeServices(db *Olric) error {
  function New (line 195) | func New(c *config.Config) (*Olric, error) {
  function convertClusterError (line 303) | func convertClusterError(err error) error {
  function convertDMapError (line 468) | func convertDMapError(err error) error {
  function registerErrors (line 496) | func registerErrors() {

FILE: olric_test.go
  function newTestOlricWithConfig (line 34) | func newTestOlricWithConfig(t *testing.T, c *config.Config) *Olric {
  type testOlricCluster (line 76) | type testOlricCluster struct
    method addMemberWithConfig (line 96) | func (cl *testOlricCluster) addMemberWithConfig(t *testing.T, c *confi...
    method addMember (line 114) | func (cl *testOlricCluster) addMember(t *testing.T) *Olric {
  function newTestOlricCluster (line 81) | func newTestOlricCluster(t *testing.T) *testOlricCluster {
  function TestOlric_StartAndShutdown (line 118) | func TestOlric_StartAndShutdown(t *testing.T) {
  function TestOlricCluster_StartAndShutdown (line 126) | func TestOlricCluster_StartAndShutdown(t *testing.T) {

FILE: ping.go
  constant DefaultPingResponse (line 25) | DefaultPingResponse = "PONG"
  method ping (line 27) | func (db *Olric) ping(ctx context.Context, addr, message string) ([]byte...
  method pingCommandHandler (line 44) | func (db *Olric) pingCommandHandler(conn redcon.Conn, cmd redcon.Command) {

FILE: ping_test.go
  function TestOlric_Ping (line 24) | func TestOlric_Ping(t *testing.T) {
  function TestOlric_PingWithMessage (line 33) | func TestOlric_PingWithMessage(t *testing.T) {

FILE: pipeline.go
  type DMapPipeline (line 57) | type DMapPipeline struct
    method addCommand (line 70) | func (dp *DMapPipeline) addCommand(key string, cmd redis.Cmder) (uint6...
    method Put (line 117) | func (dp *DMapPipeline) Put(ctx context.Context, key string, value int...
    method Get (line 177) | func (dp *DMapPipeline) Get(ctx context.Context, key string) *FutureGet {
    method Delete (line 222) | func (dp *DMapPipeline) Delete(ctx context.Context, key string) *Futur...
    method Expire (line 264) | func (dp *DMapPipeline) Expire(ctx context.Context, key string, timeou...
    method Incr (line 309) | func (dp *DMapPipeline) Incr(ctx context.Context, key string, delta in...
    method Decr (line 354) | func (dp *DMapPipeline) Decr(ctx context.Context, key string, delta in...
    method GetPut (line 405) | func (dp *DMapPipeline) GetPut(ctx context.Context, key string, value ...
    method IncrByFloat (line 459) | func (dp *DMapPipeline) IncrByFloat(ctx context.Context, key string, d...
    method execOnPartition (line 471) | func (dp *DMapPipeline) execOnPartition(ctx context.Context, partID ui...
    method Exec (line 499) | func (dp *DMapPipeline) Exec(ctx context.Context) error {
    method Discard (line 541) | func (dp *DMapPipeline) Discard() error {
    method Close (line 581) | func (dp *DMapPipeline) Close() {
    method initContexts (line 617) | func (dp *DMapPipeline) initContexts() {
  type FuturePut (line 88) | type FuturePut struct
    method Result (line 97) | func (f *FuturePut) Result() error {
  type FutureGet (line 143) | type FutureGet struct
    method Result (line 152) | func (f *FutureGet) Result() (*GetResponse, error) {
  type FutureDelete (line 190) | type FutureDelete struct
    method Result (line 199) | func (f *FutureDelete) Result() (int, error) {
  type FutureExpire (line 235) | type FutureExpire struct
    method Result (line 244) | func (f *FutureExpire) Result() error {
  type FutureIncr (line 277) | type FutureIncr struct
    method Result (line 286) | func (f *FutureIncr) Result() (int, error) {
  type FutureDecr (line 322) | type FutureDecr struct
    method Result (line 331) | func (f *FutureDecr) Result() (int, error) {
  type FutureGetPut (line 367) | type FutureGetPut struct
    method Result (line 376) | func (f *FutureGetPut) Result() (*GetResponse, error) {
  type FutureIncrByFloat (line 426) | type FutureIncrByFloat struct
    method Result (line 435) | func (f *FutureIncrByFloat) Result() (float64, error) {
  method Pipeline (line 596) | func (dm *ClusterDMap) Pipeline(opts ...PipelineOption) (*DMapPipeline, ...
  function getPipelineCmdsFromPool (line 636) | func getPipelineCmdsFromPool() []redis.Cmder {
  function putPipelineCmdsIntoPool (line 640) | func putPipelineCmdsIntoPool(cmds []redis.Cmder) {

FILE: pipeline_test.go
  function TestDMapPipeline_Put (line 27) | func TestDMapPipeline_Put(t *testing.T) {
  function TestDMapPipeline_Get (line 69) | func TestDMapPipeline_Get(t *testing.T) {
  function TestDMapPipeline_Delete (line 110) | func TestDMapPipeline_Delete(t *testing.T) {
  function TestDMapPipeline_Expire (line 148) | func TestDMapPipeline_Expire(t *testing.T) {
  function TestDMapPipeline_Incr (line 192) | func TestDMapPipeline_Incr(t *testing.T) {
  function TestDMapPipeline_Decr (line 226) | func TestDMapPipeline_Decr(t *testing.T) {
  function TestDMapPipeline_GetPut (line 260) | func TestDMapPipeline_GetPut(t *testing.T) {
  function TestDMapPipeline_IncrByFloat (line 296) | func TestDMapPipeline_IncrByFloat(t *testing.T) {
  function TestDMapPipeline_Discard (line 329) | func TestDMapPipeline_Discard(t *testing.T) {
  function TestDMapPipeline_Close (line 367) | func TestDMapPipeline_Close(t *testing.T) {
  function TestDMapPipeline_ErrNotReady (line 397) | func TestDMapPipeline_ErrNotReady(t *testing.T) {
  function TestDMapPipeline_EmbeddedClient (line 469) | func TestDMapPipeline_EmbeddedClient(t *testing.T) {
  function TestDMapPipeline_setOrGetClusterClient (line 510) | func TestDMapPipeline_setOrGetClusterClient(t *testing.T) {

FILE: pkg/flog/flog.go
  type Logger (line 56) | type Logger struct
    method SetLevel (line 70) | func (f *Logger) SetLevel(level int32) {
    method ShowLineNumber (line 78) | func (f *Logger) ShowLineNumber(show int32) {
    method V (line 93) | func (f *Logger) V(level int32) Verbose {
  function New (line 63) | func New(logger *log.Logger) *Logger {
  type Verbose (line 86) | type Verbose struct
    method Ok (line 101) | func (v Verbose) Ok() bool {
    method Printf (line 107) | func (v Verbose) Printf(format string, i ...interface{}) {
    method Println (line 121) | func (v Verbose) Println(i ...interface{}) {

FILE: pkg/service_discovery/service_discovery.go
  type ServiceDiscovery (line 21) | type ServiceDiscovery interface

FILE: pkg/storage/config.go
  type Config (line 23) | type Config struct
    method Add (line 39) | func (c *Config) Add(key string, value interface{}) {
    method Get (line 46) | func (c *Config) Get(key string) (interface{}, error) {
    method Delete (line 57) | func (c *Config) Delete(key string) {
    method Copy (line 64) | func (c *Config) Copy() *Config {
    method ToMap (line 77) | func (c *Config) ToMap() map[string]interface{} {
  function NewConfig (line 29) | func NewConfig(cfg map[string]interface{}) *Config {

FILE: pkg/storage/config_test.go
  function Test_Config (line 22) | func Test_Config(t *testing.T) {

FILE: pkg/storage/engine.go
  type TransferIterator (line 38) | type TransferIterator interface
  type Engine (line 51) | type Engine interface

FILE: pkg/storage/entry.go
  type Entry (line 18) | type Entry interface

FILE: pkg/storage/stats.go
  type Stats (line 18) | type Stats struct

FILE: pubsub.go
  type PubSub (line 25) | type PubSub struct
    method Subscribe (line 58) | func (ps *PubSub) Subscribe(ctx context.Context, channels ...string) *...
    method PSubscribe (line 62) | func (ps *PubSub) PSubscribe(ctx context.Context, channels ...string) ...
    method Publish (line 66) | func (ps *PubSub) Publish(ctx context.Context, channel string, message...
    method PubSubChannels (line 70) | func (ps *PubSub) PubSubChannels(ctx context.Context, pattern string) ...
    method PubSubNumSub (line 74) | func (ps *PubSub) PubSubNumSub(ctx context.Context, channels ...string...
    method PubSubNumPat (line 78) | func (ps *PubSub) PubSubNumPat(ctx context.Context) (int64, error) {
  function newPubSub (line 31) | func newPubSub(client *server.Client, options ...PubSubOption) (*PubSub,...

FILE: pubsub_test.go
  function pubsubTestRunner (line 27) | func pubsubTestRunner(t *testing.T, ps *PubSub, kind, channel string) {
  function TestPubSub_Publish_Subscribe (line 82) | func TestPubSub_Publish_Subscribe(t *testing.T) {
  function TestPubSub_Publish_PSubscribe (line 99) | func TestPubSub_Publish_PSubscribe(t *testing.T) {
  function TestPubSub_PubSubChannels (line 115) | func TestPubSub_PubSubChannels(t *testing.T) {
  function TestPubSub_PubSubNumSub (line 145) | func TestPubSub_PubSubNumSub(t *testing.T) {
  function TestPubSub_PubSubNumPat (line 179) | func TestPubSub_PubSubNumPat(t *testing.T) {
  function TestPubSub_Cluster (line 208) | func TestPubSub_Cluster(t *testing.T) {

FILE: stats.go
  function toMember (line 33) | func toMember(member discovery.Member) stats.Member {
  function toMembers (line 41) | func toMembers(members []discovery.Member) []stats.Member {
  method collectPartitionMetrics (line 49) | func (db *Olric) collectPartitionMetrics(partID uint64, part *partitions...
  method checkPartitionOwnership (line 76) | func (db *Olric) checkPartitionOwnership(part *partitions.Partition) bool {
  method stats (line 86) | func (db *Olric) stats(cfg statsConfig) stats.Stats {
  method statsCommandHandler (line 153) | func (db *Olric) statsCommandHandler(conn redcon.Conn, cmd redcon.Comman...

FILE: stats/stats.go
  type PartitionID (line 22) | type PartitionID
  type MemberID (line 25) | type MemberID
  type SlabInfo (line 29) | type SlabInfo struct
  type DMap (line 41) | type DMap struct
  type Partition (line 53) | type Partition struct
  type Runtime (line 68) | type Runtime struct
  type Member (line 89) | type Member struct
    method String (line 102) | func (m Member) String() string {
  type Network (line 107) | type Network struct
  type DMaps (line 125) | type DMaps struct
  type PubSub (line 146) | type PubSub struct
  type Stats (line 164) | type Stats struct

FILE: stats/stats_test.go
  function TestMember_String (line 24) | func TestMember_String(t *testing.T) {

FILE: stats_test.go
  function resetPubSubStats (line 32) | func resetPubSubStats() {
  function TestOlric_Stats (line 40) | func TestOlric_Stats(t *testing.T) {
  function TestOlric_Stats_CollectRuntime (line 93) | func TestOlric_Stats_CollectRuntime(t *testing.T) {
  function TestOlric_Stats_Cluster (line 106) | func TestOlric_Stats_Cluster(t *testing.T) {
  function TestStats_PubSub (line 118) | func TestStats_PubSub(t *testing.T) {
  function TestStats_DMap (line 199) | func TestStats_DMap(t *testing.T) {
Condensed preview — 202 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (1,093K chars).
[
  {
    "path": ".github/FUNDING.yml",
    "chars": 622,
    "preview": "# These are supported funding model platforms\n\ngithub: buraksezer\npatreon: # not used anymore\nopen_collective: # Replace"
  },
  {
    "path": ".github/workflows/ci.yml",
    "chars": 1398,
    "preview": "name: Unit & Integration tests\n\non:\n  push:\n    branches: [ \"master\" ]\n  pull_request:\n    branches: [ \"master\" ]\n\njobs:"
  },
  {
    "path": ".github/workflows/codeql-analysis.yml",
    "chars": 2323,
    "preview": "# For most projects, this workflow file will not need changing; you simply need\n# to commit it to your repository.\n#\n# Y"
  },
  {
    "path": ".github/workflows/golangci-lint.yml",
    "chars": 511,
    "preview": "name: golangci-lint\non:\n  push:\n    branches:\n      - master\n  pull_request:\n\npermissions:\n  contents: read\n  # Optional"
  },
  {
    "path": ".gitignore",
    "chars": 381,
    "preview": "# Binaries for programs and plugins\n*.exe\n*.dll\n*.so\n*.dylib\n\n# Vim creates this\n*.swp\n\n# Test binary, build with `go te"
  },
  {
    "path": "Dockerfile",
    "chars": 424,
    "preview": "FROM golang:latest as build\nWORKDIR /src/\nCOPY . /src/\nRUN go mod download\nRUN CGO_ENABLED=1 go build -ldflags=\"-s -w\" -"
  },
  {
    "path": "LICENSE",
    "chars": 10770,
    "preview": "\n                                 Apache License\n                           Version 2.0, January 2004\n                  "
  },
  {
    "path": "Makefile",
    "chars": 368,
    "preview": ".PHONY: test\ntest:\n\tgo test -p 1 ./...\n\n.PHONY: test-quick\ntest-quick:\n\tgo test -p 1 -count=1 ./...\n\n.PHONY: test-race\nt"
  },
  {
    "path": "README.md",
    "chars": 65060,
    "preview": "# Olric [![Tweet](https://img.shields.io/twitter/url/http/shields.io.svg?style=social)](https://twitter.com/intent/tweet"
  },
  {
    "path": "auth.go",
    "chars": 1514,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "auth_test.go",
    "chars": 2270,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "client.go",
    "chars": 11007,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "cluster.go",
    "chars": 5169,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "cluster_client.go",
    "chars": 24021,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "cluster_client_test.go",
    "chars": 18653,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "cluster_iterator.go",
    "chars": 6874,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "cluster_iterator_test.go",
    "chars": 2255,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "cluster_test.go",
    "chars": 1826,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "cmd/olric-server/main.go",
    "chars": 3710,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "cmd/olric-server/olric-server-local.yaml",
    "chars": 9289,
    "preview": "#\n# IMPORTANT NOTE: This configuration file is intended for testing and local development.\n#\nserver:\n  # BindAddr denote"
  },
  {
    "path": "cmd/olric-server/server/server.go",
    "chars": 3103,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "config/authentication.go",
    "chars": 1357,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "config/client.go",
    "chars": 6302,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "config/config.go",
    "chars": 18508,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "config/config_test.go",
    "chars": 6118,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "config/dmap.go",
    "chars": 3430,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "config/dmap_test.go",
    "chars": 1068,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "config/dmaps.go",
    "chars": 4423,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "config/engine.go",
    "chars": 2551,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "config/engine_test.go",
    "chars": 1214,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "config/internal/loader/loader.go",
    "chars": 6285,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "config/load.go",
    "chars": 12254,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "config/memberlist.go",
    "chars": 2599,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "config/network.go",
    "chars": 4489,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "config/network_test.go",
    "chars": 1184,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "docker/README.md",
    "chars": 3815,
    "preview": "# Multi-container environment with Docker Compose\n\nWe provide a multi-container environment to test, develop and deploy "
  },
  {
    "path": "docker/docker-compose.yml",
    "chars": 718,
    "preview": "services:\n  nginx:\n    image: nginx:latest\n    restart: on-failure\n    volumes:\n      - ${PWD}/nginx.conf:/etc/nginx/ngi"
  },
  {
    "path": "docker/nginx.conf",
    "chars": 137,
    "preview": "user  nginx;\n\nevents {\n    worker_connections   1000;\n}\n\nstream {\n    server {\n       listen 3320;\n       proxy_pass olr"
  },
  {
    "path": "docker/olric-server-consul.yaml",
    "chars": 8759,
    "preview": "server:\n  # BindAddr denotes the address that Olric will bind to for communication\n  # with other Olric nodes.\n  bindAdd"
  },
  {
    "path": "embedded_client.go",
    "chars": 12374,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "embedded_client_test.go",
    "chars": 17039,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "embedded_iterator.go",
    "chars": 2715,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "embedded_iterator_test.go",
    "chars": 2063,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "events/cluster_events.go",
    "chars": 5616,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "events/cluster_events_test.go",
    "chars": 2967,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "get_response.go",
    "chars": 3495,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "get_response_test.go",
    "chars": 9266,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "go.mod",
    "chars": 1636,
    "preview": "module github.com/olric-data/olric\n\ngo 1.23.0\n\nrequire (\n\tgithub.com/RoaringBitmap/roaring v1.9.4\n\tgithub.com/buraksezer"
  },
  {
    "path": "go.sum",
    "chars": 21479,
    "preview": "cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ngithub.com/DataDog/datadog-go v3.2.0+"
  },
  {
    "path": "hasher/hasher.go",
    "chars": 1305,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "integration_test.go",
    "chars": 13776,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/bufpool/bufpool.go",
    "chars": 1204,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/bufpool/bufpool_test.go",
    "chars": 893,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/checkpoint/checkpoint.go",
    "chars": 848,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/checkpoint/checkpoint_test.go",
    "chars": 995,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/balancer/balancer.go",
    "chars": 5945,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/balancer/balancer_test.go",
    "chars": 7509,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/partitions/fragment.go",
    "chars": 903,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/partitions/hkey.go",
    "chars": 933,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/partitions/hkey_test.go",
    "chars": 924,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/partitions/partition.go",
    "chars": 2172,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/partitions/partition_test.go",
    "chars": 2049,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/partitions/partitions.go",
    "chars": 2213,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/partitions/partitions_test.go",
    "chars": 2356,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/routingtable/callback.go",
    "chars": 979,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/routingtable/callback_test.go",
    "chars": 1128,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/routingtable/discovery.go",
    "chars": 2752,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/routingtable/discovery_test.go",
    "chars": 1778,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/routingtable/distribute.go",
    "chars": 6838,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/routingtable/distribute_test.go",
    "chars": 2193,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/routingtable/events.go",
    "chars": 1966,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/routingtable/events_test.go",
    "chars": 2864,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/routingtable/handlers.go",
    "chars": 919,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/routingtable/left_over_data.go",
    "chars": 1982,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/routingtable/left_over_data_test.go",
    "chars": 2041,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/routingtable/members.go",
    "chars": 1541,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/routingtable/members_test.go",
    "chars": 2202,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/routingtable/operations.go",
    "chars": 3732,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/routingtable/routingtable.go",
    "chars": 12742,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/routingtable/routingtable_test.go",
    "chars": 9080,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/cluster/routingtable/update.go",
    "chars": 2949,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/discovery/delegate.go",
    "chars": 1695,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/discovery/discovery.go",
    "chars": 8812,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/discovery/discovery_test.go",
    "chars": 6211,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/discovery/events.go",
    "chars": 1657,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/discovery/member.go",
    "chars": 2223,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/discovery/member_test.go",
    "chars": 1867,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/atomic.go",
    "chars": 5307,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/atomic_handlers.go",
    "chars": 2889,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/atomic_test.go",
    "chars": 9961,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/balance.go",
    "chars": 4368,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/balance_test.go",
    "chars": 6148,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/compaction.go",
    "chars": 2762,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/compaction_test.go",
    "chars": 2609,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/config.go",
    "chars": 2401,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/config_test.go",
    "chars": 2583,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/delete.go",
    "chars": 4840,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/delete_handlers.go",
    "chars": 1768,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/delete_test.go",
    "chars": 8840,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/destroy.go",
    "chars": 2296,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/destroy_handlers.go",
    "chars": 2340,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/destroy_test.go",
    "chars": 3191,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/dmap.go",
    "chars": 3624,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/dmap_test.go",
    "chars": 970,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/env.go",
    "chars": 1159,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/eviction.go",
    "chars": 6849,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/eviction_test.go",
    "chars": 5484,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/expire.go",
    "chars": 1016,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/expire_handlers.go",
    "chars": 1885,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/expire_test.go",
    "chars": 3022,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/fragment.go",
    "chars": 4042,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/fragment_test.go",
    "chars": 3131,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/get.go",
    "chars": 11763,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/get_handlers.go",
    "chars": 1995,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/get_test.go",
    "chars": 5938,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/handlers.go",
    "chars": 2220,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/janitor.go",
    "chars": 2500,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/lock.go",
    "chars": 6014,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/lock_handlers.go",
    "chars": 3369,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/lock_test.go",
    "chars": 10893,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/put.go",
    "chars": 11290,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/put_handlers.go",
    "chars": 2424,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/put_test.go",
    "chars": 9627,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/scan_handlers.go",
    "chars": 3078,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/scan_test.go",
    "chars": 4955,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/service.go",
    "chars": 4263,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/service_test.go",
    "chars": 1041,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/dmap/stats_test.go",
    "chars": 2432,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/environment/environment.go",
    "chars": 1206,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/environment/environment_test.go",
    "chars": 1251,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/locker/locker.go",
    "chars": 2754,
    "preview": "/*\nPackage locker provides a mechanism for creating finer-grained locking to help\nfree up more global locks to handle ot"
  },
  {
    "path": "internal/locker/locker_test.go",
    "chars": 2695,
    "preview": "package locker\n\nimport (\n\t\"math/rand\"\n\t\"strconv\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestLockCounter(t *testing.T) {\n\tl :"
  },
  {
    "path": "internal/protocol/cluster.go",
    "chars": 1651,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/protocol/cluster_test.go",
    "chars": 1518,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/protocol/commands.go",
    "chars": 2754,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/protocol/dmap.go",
    "chars": 19962,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/protocol/dmap_test.go",
    "chars": 16700,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/protocol/errors.go",
    "chars": 2508,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/protocol/errors_test.go",
    "chars": 1624,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/protocol/pubsub.go",
    "chars": 5537,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/protocol/pubsub_test.go",
    "chars": 2785,
    "preview": "package protocol\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestProtocol_ParsePubli"
  },
  {
    "path": "internal/protocol/system.go",
    "chars": 5363,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/protocol/system_test.go",
    "chars": 3610,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/pubsub/handlers.go",
    "chars": 3841,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/pubsub/handlers_test.go",
    "chars": 10922,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/pubsub/pubsub.go",
    "chars": 10733,
    "preview": "// The MIT License (MIT)\n//\n// Copyright (c) 2016 Josh Baker\n//\n// Permission is hereby granted, free of charge, to any "
  },
  {
    "path": "internal/pubsub/pubsub_test.go",
    "chars": 5464,
    "preview": "// The MIT License (MIT)\n//\n// Copyright (c) 2016 Josh Baker\n//\n// Permission is hereby granted, free of charge, to any "
  },
  {
    "path": "internal/pubsub/service.go",
    "chars": 3363,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/ramblock/compaction.go",
    "chars": 2613,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/ramblock/compaction_test.go",
    "chars": 2968,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/ramblock/entry/entry.go",
    "chars": 3034,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/ramblock/entry/entry_test.go",
    "chars": 3637,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/ramblock/ramblock.go",
    "chars": 13447,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/ramblock/ramblock_test.go",
    "chars": 25967,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/ramblock/table/pack.go",
    "chars": 2281,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/ramblock/table/pack_test.go",
    "chars": 3517,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/ramblock/table/table.go",
    "chars": 16231,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/ramblock/table/table_test.go",
    "chars": 13500,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/ramblock/transport.go",
    "chars": 1848,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/resp/encoder.go",
    "chars": 3426,
    "preview": "// Copyright (c) 2013 The github.com/go-redis/redis Authors.\n// All rights reserved.\n//\n// Redistribution and use in sou"
  },
  {
    "path": "internal/resp/encoder_test.go",
    "chars": 5994,
    "preview": "package resp\n\nimport (\n\t\"bytes\"\n\t\"encoding\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\ntype My"
  },
  {
    "path": "internal/resp/scan.go",
    "chars": 3545,
    "preview": "// Copyright (c) 2013 The github.com/go-redis/redis Authors.\n// All rights reserved.\n//\n// Redistribution and use in sou"
  },
  {
    "path": "internal/roundrobin/round_robin.go",
    "chars": 2342,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/roundrobin/round_robin_test.go",
    "chars": 2234,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/server/client.go",
    "chars": 3101,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/server/client_test.go",
    "chars": 3707,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/server/handler.go",
    "chars": 2382,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/server/handler_test.go",
    "chars": 2731,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/server/mux.go",
    "chars": 3395,
    "preview": "// The MIT License (MIT)\n//\n// Copyright (c) 2016 Josh Baker\n//\n// Permission is hereby granted, free of charge, to any "
  },
  {
    "path": "internal/server/mux_test.go",
    "chars": 1455,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/server/server.go",
    "chars": 7892,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/server/server_test.go",
    "chars": 2956,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/service/service.go",
    "chars": 739,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/stats/stats.go",
    "chars": 2056,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/stats/stats_test.go",
    "chars": 1263,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/testcluster/testcluster.go",
    "chars": 4937,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/testutil/mockfragment/mockfragment.go",
    "chars": 2614,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/testutil/testutil.go",
    "chars": 2831,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "internal/util/safe.go",
    "chars": 1554,
    "preview": "// Copyright (c) 2013 The github.com/go-redis/redis Authors.\n// All rights reserved.\n//\n// Redistribution and use in sou"
  },
  {
    "path": "internal/util/strconv.go",
    "chars": 1857,
    "preview": "// Copyright (c) 2013 The github.com/go-redis/redis Authors.\n// All rights reserved.\n//\n// Redistribution and use in sou"
  },
  {
    "path": "internal/util/unsafe.go",
    "chars": 1765,
    "preview": "// Copyright (c) 2013 The github.com/go-redis/redis Authors.\n// All rights reserved.\n//\n// Redistribution and use in sou"
  },
  {
    "path": "olric-server-docker.yaml",
    "chars": 9186,
    "preview": "server:\n  # BindAddr denotes the address that Olric will bind to for communication\n  # with other Olric nodes.\n  bindAdd"
  },
  {
    "path": "olric.go",
    "chars": 14197,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "olric_test.go",
    "chars": 3535,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "ping.go",
    "chars": 1431,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "ping_test.go",
    "chars": 1223,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "pipeline.go",
    "chars": 18745,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "pipeline_test.go",
    "chars": 11676,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "pkg/flog/flog.go",
    "chars": 4347,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "pkg/neterrors/errors.go",
    "chars": 920,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "pkg/service_discovery/service_discovery.go",
    "chars": 1898,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "pkg/storage/config.go",
    "chars": 1872,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "pkg/storage/config_test.go",
    "chars": 1594,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "pkg/storage/engine.go",
    "chars": 5090,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "pkg/storage/entry.go",
    "chars": 1510,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "pkg/storage/stats.go",
    "chars": 1110,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "pubsub.go",
    "chars": 2079,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "pubsub_test.go",
    "chars": 6515,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "stats/stats.go",
    "chars": 6785,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  },
  {
    "path": "stats/stats_test.go",
    "chars": 870,
    "preview": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you ma"
  }
]

// ... and 2 more files (download for full content)

About this extraction

This page contains the full source code of the buraksezer/olricdb GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 202 files (981.6 KB), approximately 279.8k tokens, and a symbol index with 1519 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!